diff --git a/go.mod b/go.mod index 33dde1816c..10d75aedd6 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/go-openapi/loads v0.21.2 github.com/google/go-github/v53 v53.2.1-0.20230815134205-bb00f570d301 - github.com/gorilla/websocket v1.5.1 + github.com/gorilla/websocket v1.5.3 github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/go-plugin v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.2 @@ -27,42 +27,42 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/moby/term v0.5.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.46.0 + github.com/prometheus/common v0.60.0 github.com/rhysd/go-github-selfupdate v1.2.3 github.com/samber/lo v1.38.1 github.com/sirupsen/logrus v1.9.3 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/vmware-labs/yaml-jsonpath v0.3.2 github.com/wk8/go-ordered-map/v2 v2.1.8 go.uber.org/atomic v1.11.0 - golang.org/x/mod v0.18.0 - golang.org/x/sync v0.7.0 - google.golang.org/grpc v1.64.0 - google.golang.org/protobuf v1.34.1 + golang.org/x/mod v0.21.0 + golang.org/x/sync v0.8.0 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible gotest.tools/v3 v3.5.1 - k8s.io/api v0.30.2 - k8s.io/apiextensions-apiserver v0.30.2 - k8s.io/apimachinery v0.30.2 - k8s.io/apiserver v0.30.2 - k8s.io/cli-runtime v0.30.2 - k8s.io/client-go v0.30.2 - k8s.io/component-helpers v0.30.2 - k8s.io/klog/v2 v2.120.1 - k8s.io/kube-aggregator v0.30.2 - k8s.io/kubectl v0.30.2 - k8s.io/kubelet v0.30.2 - k8s.io/metrics v0.30.2 - k8s.io/pod-security-admission v0.30.2 - k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 - sigs.k8s.io/controller-runtime v0.18.4 + k8s.io/api v0.31.1 + k8s.io/apiextensions-apiserver v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/apiserver v0.31.1 + k8s.io/cli-runtime v0.31.1 + k8s.io/client-go v0.31.1 + k8s.io/component-helpers v0.31.1 + k8s.io/klog/v2 v2.130.1 + k8s.io/kube-aggregator v0.31.1 + k8s.io/kubectl v0.31.1 + k8s.io/kubelet v0.31.1 + k8s.io/metrics v0.31.1 + k8s.io/pod-security-admission v0.31.1 + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 + sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 ) @@ -70,27 +70,30 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.7 // indirect github.com/Sytten/logrus-zap-hook v0.1.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/creack/pty v1.1.21 // indirect github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960 // indirect - github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/camelcase v1.0.0 // indirect github.com/fatih/color v1.15.0 // indirect github.com/frankban/quicktest v1.14.5 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/google/cel-go v0.17.8 // indirect + github.com/google/cel-go v0.21.0 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 // indirect + github.com/klauspost/compress v1.17.10 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/loft-sh/apiserver v0.0.0-20240607231110-634aeeab2b36 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect @@ -102,11 +105,13 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect - k8s.io/kms v0.30.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + k8s.io/kms v0.31.1 // indirect mvdan.cc/sh/v3 v3.6.0 // indirect ) @@ -126,15 +131,14 @@ require ( github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.2 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/spec v0.20.6 // indirect github.com/go-openapi/strfmt v0.21.3 // indirect - github.com/go-openapi/swag v0.22.7 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -143,7 +147,7 @@ require ( github.com/google/go-github/v30 v30.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect @@ -162,50 +166,50 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/tcnksm/go-gitconfig v0.1.2 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.14 - go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect - go.etcd.io/etcd/client/v3 v3.5.14 + go.etcd.io/etcd/api/v3 v3.5.16 + go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect + go.etcd.io/etcd/client/v3 v3.5.16 go.mongodb.org/mongo-driver v1.10.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect + go.opentelemetry.io/otel v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/sdk v1.30.0 // indirect + go.opentelemetry.io/otel/trace v1.30.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 - golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 - k8s.io/component-base v0.30.2 // indirect - k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect + k8s.io/component-base v0.31.1 // indirect + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 6028b5bda9..d81b1ab87c 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/Sytten/logrus-zap-hook v0.1.0 h1:GPsDlO0b+rvfb6WohFNreI3Fe2I6MDyv1afo github.com/Sytten/logrus-zap-hook v0.1.0/go.mod h1:J0ktevklw/xJNpI2FzfTdJssk4P0vq3K2qzwihJ2gWU= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= @@ -83,8 +83,8 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= @@ -111,8 +111,8 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= -github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -135,8 +135,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= -github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -160,14 +160,14 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= @@ -181,8 +181,8 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= -github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -218,8 +218,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= -github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.21.0 h1:cl6uW/gxN+Hy50tNYvI691+sXxioCnstFzLp2WO4GCI= +github.com/google/cel-go v0.21.0/go.mod h1:rHUlWCcBKgyEk+eV03RPdZUekPp6YcJwV0FxuUksYxc= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -247,8 +247,8 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -258,9 +258,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -269,8 +268,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= @@ -314,6 +313,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -326,8 +327,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/loft-sh/admin-apis v0.0.0-20240203010124-3600c1c582a8 h1:nuY9Vgvabh2FlaTYp9yhzh3cBzY6jjFEFSsOvw9ZAJw= github.com/loft-sh/admin-apis v0.0.0-20240203010124-3600c1c582a8/go.mod h1:MWczNwKvWssHo1KaeZKaWDdRLYSNbWqQBGsTLoCNd7U= github.com/loft-sh/agentapi/v4 v4.0.0-alpha.6.0.20240614131646-3359da6a4818 h1:NysDbuR9OBb0sl3BvjD+6J+wlupSrydjSnG6egS8JQ4= @@ -374,8 +379,8 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -410,8 +415,8 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= -github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -432,13 +437,13 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= -github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rhysd/go-github-selfupdate v1.2.3 h1:iaa+J202f+Nc+A8zi75uccC8Wg3omaM7HDeimXA22Ag= @@ -465,8 +470,8 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:s github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -502,6 +507,8 @@ github.com/vmware-labs/yaml-jsonpath v0.3.2 h1:/5QKeCBGdsInyDCyVNLbXyilb61MXGi9N github.com/vmware-labs/yaml-jsonpath v0.3.2/go.mod h1:U6whw1z03QyqgWdgXxvVnQ90zN1BWz5V+51Ewf8k+rQ= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= @@ -513,45 +520,45 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0= -go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU= -go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ= -go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI= -go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= -go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= -go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg= -go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk= -go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= -go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= -go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= -go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= -go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= -go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= +go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= +go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= +go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= +go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8= +go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg= +go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= +go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= +go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M= +go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0= +go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA= +go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw= +go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok= +go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ= go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -577,15 +584,15 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= -golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 h1:1wqE9dj9NpSm04INVsJhhEUzhuDVjbcyKH91sVyPATw= +golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -604,8 +611,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -631,16 +638,16 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -651,8 +658,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -688,8 +695,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -697,8 +704,8 @@ golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -709,14 +716,14 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -744,8 +751,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -775,18 +782,18 @@ google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f h1:cUMEy+8oS78BWIH9OWazBkzbr090Od9tWBNtZHkOhf0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -797,8 +804,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -806,6 +813,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -836,62 +845,62 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= -k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= -k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= -k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= -k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/apiserver v0.30.2 h1:ACouHiYl1yFI2VFI3YGM+lvxgy6ir4yK2oLOsLI1/tw= -k8s.io/apiserver v0.30.2/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8= -k8s.io/cli-runtime v0.30.2 h1:ooM40eEJusbgHNEqnHziN9ZpLN5U4WcQGsdLKVxpkKE= -k8s.io/cli-runtime v0.30.2/go.mod h1:Y4g/2XezFyTATQUbvV5WaChoUGhojv/jZAtdp5Zkm0A= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c= +k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM= +k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= +k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= -k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= -k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= -k8s.io/component-helpers v0.30.2 h1:kDMYLiWEYeWU7H6jBI+Ua1i2hqNh0DzqDHNIppFC3po= -k8s.io/component-helpers v0.30.2/go.mod h1:tI0anfS6AbRqooaICkGg7UVAQLedOauVSQW9srDBnJw= +k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= +k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= +k8s.io/component-helpers v0.31.1 h1:5hZUf3747atdgtR3gPntrG35rC2CkK7rYq2KUraz6Os= +k8s.io/component-helpers v0.31.1/go.mod h1:ye0Gi8KzFNTfpIuzvVDtxJQMP/0Owkukf1vGf22Hl6U= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.30.2 h1:VSZILO/tkzrz5Tu2j+yFQZ2Dc5JerQZX2GqhFJbQrfw= -k8s.io/kms v0.30.2/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= -k8s.io/kube-aggregator v0.30.2 h1:0+yk/ED6foCprY8VmkDPUhngjaAPKsNTXB/UrtvbIz0= -k8s.io/kube-aggregator v0.30.2/go.mod h1:EhqCfDdxysNWXk1wRL9SEHAdo1DKl6EULQagztkBcXE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kms v0.31.1 h1:cGLyV3cIwb0ovpP/jtyIe2mEuQ/MkbhmeBF2IYCA9Io= +k8s.io/kms v0.31.1/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94= +k8s.io/kube-aggregator v0.31.1 h1:vrYBTTs3xMrpiEsmBjsLETZE9uuX67oQ8B3i1BFfMPw= +k8s.io/kube-aggregator v0.31.1/go.mod h1:+aW4NX50uneozN+BtoCxI4g7ND922p8Wy3tWKFDiWVk= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= -k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= -k8s.io/kubectl v0.30.2 h1:cgKNIvsOiufgcs4yjvgkK0+aPCfa8pUwzXdJtkbhsH8= -k8s.io/kubectl v0.30.2/go.mod h1:rz7GHXaxwnigrqob0lJsiA07Df8RE3n1TSaC2CTeuB4= -k8s.io/kubelet v0.30.2 h1:Ck4E/pHndI20IzDXxS57dElhDGASPO5pzXF7BcKfmCY= -k8s.io/kubelet v0.30.2/go.mod h1:DSwwTbLQmdNkebAU7ypIALR4P9aXZNFwgRmedojUE94= -k8s.io/metrics v0.30.2 h1:zj4kIPTCfEbY0RHEogpA7QtlItU7xaO11+Gz1zVDxlc= -k8s.io/metrics v0.30.2/go.mod h1:GpoO5XTy/g8CclVLtgA5WTrr2Cy5vCsqr5Xa/0ETWIk= -k8s.io/pod-security-admission v0.30.2 h1:UlHnkvvOr+rgQplOqD+SHzLUF8EgKIOCpDU8kaMeTQQ= -k8s.io/pod-security-admission v0.30.2/go.mod h1:gMUJUG9zOgNBk0VIz5BS7uIYiYPEoXkBSeHh6rG2m8c= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= +k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24= +k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM= +k8s.io/kubelet v0.31.1 h1:aAxwVxGzbbMKKk/FnSjvkN52K3LdHhjhzmYcyGBuE0c= +k8s.io/kubelet v0.31.1/go.mod h1:8ZbexYHqUO946gXEfFmnMZiK2UKRGhk7LlGvJ71p2Ig= +k8s.io/metrics v0.31.1 h1:h4I4dakgh/zKflWYAOQhwf0EXaqy8LxAIyE/GBvxqRc= +k8s.io/metrics v0.31.1/go.mod h1:JuH1S9tJiH9q1VCY0yzSCawi7kzNLsDzlWDJN4xR+iA= +k8s.io/pod-security-admission v0.31.1 h1:j++ISpfQU0mWpKhoS4tY06Wm5EKdn65teL4lPJhEMIM= +k8s.io/pod-security-admission v0.31.1/go.mod h1:0aE5T6MGm/50Nr/diBrC6+wwpxsT2E7NECe+TepUuEg= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/sh/v3 v3.6.0 h1:gtva4EXJ0dFNvl5bHjcUEvws+KRcDslT8VKheTYkbGU= mvdan.cc/sh/v3 v3.6.0/go.mod h1:U4mhtBLZ32iWhif5/lD+ygy1zrgaQhUu+XFy7C8+TTA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= diff --git a/pkg/controllers/register.go b/pkg/controllers/register.go index 2b81d7da3d..3005075c84 100644 --- a/pkg/controllers/register.go +++ b/pkg/controllers/register.go @@ -156,14 +156,16 @@ func registerServiceSyncControllers(ctx *synccontext.ControllerContext) error { globalLocalManager.GetCache().WaitForCacheSync(ctx) // register controller + name := "map-host-service-syncer" controller := &servicesync.ServiceSyncer{ - SyncContext: ctx.ToRegisterContext().ToSyncContext("map-host-service-syncer"), + Name: name, + SyncContext: ctx.ToRegisterContext().ToSyncContext(name), SyncServices: mapping, CreateNamespace: true, CreateEndpoints: true, From: globalLocalManager, To: ctx.VirtualManager, - Log: loghelper.New("map-host-service-syncer"), + Log: loghelper.New(name), } err = controller.Register() if err != nil { @@ -176,14 +178,15 @@ func registerServiceSyncControllers(ctx *synccontext.ControllerContext) error { if err != nil { return errors.Wrap(err, "parse physical service mapping") } - + name := "map-virtual-service-syncer" controller := &servicesync.ServiceSyncer{ - SyncContext: ctx.ToRegisterContext().ToSyncContext("map-virtual-service-syncer"), + Name: name, + SyncContext: ctx.ToRegisterContext().ToSyncContext(name), SyncServices: mapping, IsVirtualToHostSyncer: true, From: ctx.VirtualManager, To: ctx.LocalManager, - Log: loghelper.New("map-virtual-service-syncer"), + Log: loghelper.New(name), } if ctx.Config.Experimental.MultiNamespaceMode.Enabled { diff --git a/pkg/controllers/resources/configmaps/syncer.go b/pkg/controllers/resources/configmaps/syncer.go index e3c5319c19..183e937fa0 100644 --- a/pkg/controllers/resources/configmaps/syncer.go +++ b/pkg/controllers/resources/configmaps/syncer.go @@ -54,7 +54,7 @@ func (s *configMapSyncer) Syncer() syncertypes.Sync[client.Object] { var _ syncertypes.ControllerModifier = &configMapSyncer{} func (s *configMapSyncer) ModifyController(ctx *synccontext.RegisterContext, builder *builder.Builder) (*builder.Builder, error) { - return builder.WatchesRawSource(ctx.Mappings.Store().Watch(s.GroupVersionKind(), func(nameMapping synccontext.NameMapping, queue workqueue.RateLimitingInterface) { + return builder.WatchesRawSource(ctx.Mappings.Store().Watch(s.GroupVersionKind(), func(nameMapping synccontext.NameMapping, queue workqueue.TypedRateLimitingInterface[ctrl.Request]) { queue.Add(reconcile.Request{NamespacedName: nameMapping.VirtualName}) })), nil } diff --git a/pkg/controllers/resources/csistoragecapacities/syncer.go b/pkg/controllers/resources/csistoragecapacities/syncer.go index 4839c89ac8..de387f8372 100644 --- a/pkg/controllers/resources/csistoragecapacities/syncer.go +++ b/pkg/controllers/resources/csistoragecapacities/syncer.go @@ -117,26 +117,26 @@ func (s *csistoragecapacitySyncer) ModifyController(ctx *synccontext.RegisterCon syncContext := ctx.ToSyncContext("csi storage capacity syncer") return builder.WatchesRawSource(source.Kind(allNSCache, s.Resource(), &handler.Funcs{ - CreateFunc: func(_ context.Context, ce event.CreateEvent, rli workqueue.RateLimitingInterface) { + CreateFunc: func(_ context.Context, ce event.TypedCreateEvent[client.Object], rli workqueue.TypedRateLimitingInterface[ctrl.Request]) { obj := ce.Object s.enqueuePhysical(syncContext, obj, rli) }, - UpdateFunc: func(_ context.Context, ue event.UpdateEvent, rli workqueue.RateLimitingInterface) { + UpdateFunc: func(_ context.Context, ue event.TypedUpdateEvent[client.Object], rli workqueue.TypedRateLimitingInterface[ctrl.Request]) { obj := ue.ObjectNew s.enqueuePhysical(syncContext, obj, rli) }, - DeleteFunc: func(_ context.Context, de event.DeleteEvent, rli workqueue.RateLimitingInterface) { + DeleteFunc: func(_ context.Context, de event.TypedDeleteEvent[client.Object], rli workqueue.TypedRateLimitingInterface[ctrl.Request]) { obj := de.Object s.enqueuePhysical(syncContext, obj, rli) }, - GenericFunc: func(_ context.Context, ge event.GenericEvent, rli workqueue.RateLimitingInterface) { + GenericFunc: func(_ context.Context, ge event.TypedGenericEvent[client.Object], rli workqueue.TypedRateLimitingInterface[ctrl.Request]) { obj := ge.Object s.enqueuePhysical(syncContext, obj, rli) }, })), nil } -func (s *csistoragecapacitySyncer) enqueuePhysical(ctx *synccontext.SyncContext, obj client.Object, q workqueue.RateLimitingInterface) { +func (s *csistoragecapacitySyncer) enqueuePhysical(ctx *synccontext.SyncContext, obj client.Object, q workqueue.TypedRateLimitingInterface[ctrl.Request]) { if obj == nil { return } diff --git a/pkg/controllers/resources/nodes/fake_syncer.go b/pkg/controllers/resources/nodes/fake_syncer.go index 5298bcd469..033eff4016 100644 --- a/pkg/controllers/resources/nodes/fake_syncer.go +++ b/pkg/controllers/resources/nodes/fake_syncer.go @@ -240,7 +240,7 @@ func createFakeNode( BootID: newGUID(), ContainerRuntimeVersion: "docker://19.3.12", KernelVersion: "4.19.76-fakelinux", - KubeProxyVersion: FakeNodesVersion, + KubeProxyVersion: FakeNodesVersion, //nolint:staticcheck //deprecated, but we should continue to use it until the api removes it KubeletVersion: FakeNodesVersion, MachineID: newGUID(), SystemUUID: newGUID(), diff --git a/pkg/controllers/resources/nodes/fake_syncer_test.go b/pkg/controllers/resources/nodes/fake_syncer_test.go index c2be138e90..8c56724a23 100644 --- a/pkg/controllers/resources/nodes/fake_syncer_test.go +++ b/pkg/controllers/resources/nodes/fake_syncer_test.go @@ -133,7 +133,7 @@ func TestFakeSync(t *testing.T) { Architecture: "amd64", ContainerRuntimeVersion: "docker://19.3.12", KernelVersion: "4.19.76-fakelinux", - KubeProxyVersion: "v1.16.6-beta.0", + KubeProxyVersion: "v1.16.6-beta.0", //nolint:staticcheck //deprecated, but we should continue to use it until the api removes it KubeletVersion: "v1.16.6-beta.0", OperatingSystem: "linux", OSImage: "Fake Kubernetes Image", @@ -169,7 +169,7 @@ func TestFakeSync(t *testing.T) { node.Status.NodeInfo.MachineID = fakeGUID node.Status.NodeInfo.SystemUUID = fakeGUID node.Status.NodeInfo.KernelVersion = fakeGUID - node.Status.NodeInfo.KubeProxyVersion = fakeGUID + node.Status.NodeInfo.KubeProxyVersion = fakeGUID //nolint:staticcheck //deprecated, but we should continue to use it until the api removes it node.Status.NodeInfo.KubeletVersion = fakeGUID } diff --git a/pkg/controllers/resources/nodes/syncer.go b/pkg/controllers/resources/nodes/syncer.go index dcba41b561..3780941a39 100644 --- a/pkg/controllers/resources/nodes/syncer.go +++ b/pkg/controllers/resources/nodes/syncer.go @@ -154,17 +154,17 @@ func (s *nodeSyncer) ModifyController(ctx *synccontext.RegisterContext, bld *bui // the syncer is configured to update virtual node's .status.allocatable fields by summing the consumption of these pods bld.WatchesRawSource( source.Kind(podCache, &corev1.Pod{}, - handler.TypedFuncs[*corev1.Pod]{ - GenericFunc: func(_ context.Context, ev event.TypedGenericEvent[*corev1.Pod], q workqueue.RateLimitingInterface) { + handler.TypedFuncs[*corev1.Pod, ctrl.Request]{ + GenericFunc: func(_ context.Context, ev event.TypedGenericEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[ctrl.Request]) { enqueueNonVClusterPod(nil, ev.Object, q) }, - CreateFunc: func(_ context.Context, ev event.TypedCreateEvent[*corev1.Pod], q workqueue.RateLimitingInterface) { + CreateFunc: func(_ context.Context, ev event.TypedCreateEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[ctrl.Request]) { enqueueNonVClusterPod(nil, ev.Object, q) }, - UpdateFunc: func(_ context.Context, ue event.TypedUpdateEvent[*corev1.Pod], q workqueue.RateLimitingInterface) { + UpdateFunc: func(_ context.Context, ue event.TypedUpdateEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[ctrl.Request]) { enqueueNonVClusterPod(ue.ObjectOld, ue.ObjectNew, q) }, - DeleteFunc: func(_ context.Context, ev event.TypedDeleteEvent[*corev1.Pod], q workqueue.RateLimitingInterface) { + DeleteFunc: func(_ context.Context, ev event.TypedDeleteEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[ctrl.Request]) { enqueueNonVClusterPod(nil, ev.Object, q) }, }), @@ -174,7 +174,7 @@ func (s *nodeSyncer) ModifyController(ctx *synccontext.RegisterContext, bld *bui } // only used when scheduler is enabled -func enqueueNonVClusterPod(old, new client.Object, q workqueue.RateLimitingInterface) { +func enqueueNonVClusterPod(old, new client.Object, q workqueue.TypedRateLimitingInterface[ctrl.Request]) { pod, ok := new.(*corev1.Pod) if !ok { klog.Errorf("invalid type passed to pod handler: %T", new) diff --git a/pkg/controllers/resources/pods/syncer.go b/pkg/controllers/resources/pods/syncer.go index 222c85d6cf..12a1a1fa45 100644 --- a/pkg/controllers/resources/pods/syncer.go +++ b/pkg/controllers/resources/pods/syncer.go @@ -126,7 +126,7 @@ var _ syncertypes.ControllerModifier = &podSyncer{} func (s *podSyncer) ModifyController(registerContext *synccontext.RegisterContext, builder *builder.Builder) (*builder.Builder, error) { eventHandler := handler.Funcs{ - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[ctrl.Request]) { // no need to reconcile pods if namespace labels didn't change if reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) { return diff --git a/pkg/controllers/resources/secrets/syncer.go b/pkg/controllers/resources/secrets/syncer.go index 8d4ee327c3..87e23bd9d4 100644 --- a/pkg/controllers/resources/secrets/syncer.go +++ b/pkg/controllers/resources/secrets/syncer.go @@ -58,7 +58,7 @@ func (s *secretSyncer) Syncer() syncertypes.Sync[client.Object] { var _ syncertypes.ControllerModifier = &secretSyncer{} func (s *secretSyncer) ModifyController(ctx *synccontext.RegisterContext, builder *builder.Builder) (*builder.Builder, error) { - return builder.WatchesRawSource(ctx.Mappings.Store().Watch(s.GroupVersionKind(), func(nameMapping synccontext.NameMapping, queue workqueue.RateLimitingInterface) { + return builder.WatchesRawSource(ctx.Mappings.Store().Watch(s.GroupVersionKind(), func(nameMapping synccontext.NameMapping, queue workqueue.TypedRateLimitingInterface[ctrl.Request]) { queue.Add(reconcile.Request{NamespacedName: nameMapping.VirtualName}) })), nil } diff --git a/pkg/controllers/servicesync/servicesync.go b/pkg/controllers/servicesync/servicesync.go index 96c2f5bec6..f682007a6a 100644 --- a/pkg/controllers/servicesync/servicesync.go +++ b/pkg/controllers/servicesync/servicesync.go @@ -2,6 +2,7 @@ package servicesync import ( "context" + "fmt" "strings" "github.com/loft-sh/vcluster/pkg/constants" @@ -22,6 +23,7 @@ import ( ) type ServiceSyncer struct { + Name string SyncContext *synccontext.SyncContext SyncServices map[string]types.NamespacedName @@ -50,7 +52,7 @@ func (e *ServiceSyncer) Register() error { WithOptions(controller.Options{ CacheSyncTimeout: constants.DefaultCacheSyncTimeout, }). - Named("servicesync"). + Named(fmt.Sprintf("servicesyncer-%s", e.Name)). For(&corev1.Service{}). WatchesRawSource(source.Kind(e.To.GetCache(), &corev1.Service{}, handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, object *corev1.Service) []reconcile.Request { if object == nil { diff --git a/pkg/mappings/store/watcher.go b/pkg/mappings/store/watcher.go index b03414b2b2..6d463ce5df 100644 --- a/pkg/mappings/store/watcher.go +++ b/pkg/mappings/store/watcher.go @@ -6,13 +6,14 @@ import ( "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" ) type watcher struct { m sync.Mutex addQueueFn synccontext.AddQueueFunc - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[ctrl.Request] } func (w *watcher) Dispatch(nameMapping synccontext.NameMapping) { @@ -26,7 +27,7 @@ func (w *watcher) Dispatch(nameMapping synccontext.NameMapping) { w.addQueueFn(nameMapping, w.queue) } -func (w *watcher) Start(_ context.Context, queue workqueue.RateLimitingInterface) error { +func (w *watcher) Start(_ context.Context, queue workqueue.TypedRateLimitingInterface[ctrl.Request]) error { w.m.Lock() defer w.m.Unlock() diff --git a/pkg/syncer/handler.go b/pkg/syncer/handler.go index dda5a27a02..566b731efe 100644 --- a/pkg/syncer/handler.go +++ b/pkg/syncer/handler.go @@ -4,12 +4,13 @@ import ( "context" "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" ) -type enqueueFunc func(ctx context.Context, obj client.Object, q workqueue.RateLimitingInterface, isDelete bool) +type enqueueFunc func(ctx context.Context, obj client.Object, q workqueue.TypedRateLimitingInterface[ctrl.Request], isDelete bool) func newEventHandler(enqueue enqueueFunc) handler.EventHandler { return &eventHandler{enqueue: enqueue} @@ -20,22 +21,22 @@ type eventHandler struct { } // Create is called in response to an create event - e.g. Pod Creation. -func (r *eventHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (r *eventHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[ctrl.Request]) { r.enqueue(ctx, evt.Object, q, false) } // Update is called in response to an update event - e.g. Pod Updated. -func (r *eventHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (r *eventHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[ctrl.Request]) { r.enqueue(ctx, evt.ObjectNew, q, false) } // Delete is called in response to a delete event - e.g. Pod Deleted. -func (r *eventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (r *eventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[ctrl.Request]) { r.enqueue(ctx, evt.Object, q, true) } // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or // external trigger request - e.g. reconcile Autoscaling, or a Webhook. -func (r *eventHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (r *eventHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[ctrl.Request]) { r.enqueue(ctx, evt.Object, q, false) } diff --git a/pkg/syncer/synccontext/mapper.go b/pkg/syncer/synccontext/mapper.go index 84904e3356..1179392fa7 100644 --- a/pkg/syncer/synccontext/mapper.go +++ b/pkg/syncer/synccontext/mapper.go @@ -9,6 +9,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/source" @@ -32,7 +33,7 @@ type MappingsRegistry interface { Store() MappingsStore } -type AddQueueFunc func(nameMapping NameMapping, queue workqueue.RateLimitingInterface) +type AddQueueFunc func(nameMapping NameMapping, queue workqueue.TypedRateLimitingInterface[ctrl.Request]) // MappingsStore holds logic to store and retrieve mappings type MappingsStore interface { diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go index 1b8d36b310..0a69d48f3b 100644 --- a/pkg/syncer/syncer.go +++ b/pkg/syncer/syncer.go @@ -398,7 +398,7 @@ func (r *SyncController) extractRequest(ctx *synccontext.SyncContext, req ctrl.R return req, pReq, nil } -func (r *SyncController) enqueueVirtual(_ context.Context, obj client.Object, q workqueue.RateLimitingInterface, isDelete bool) { +func (r *SyncController) enqueueVirtual(_ context.Context, obj client.Object, q workqueue.TypedRateLimitingInterface[ctrl.Request], isDelete bool) { if obj == nil { return } @@ -425,7 +425,7 @@ func (r *SyncController) enqueueVirtual(_ context.Context, obj client.Object, q }) } -func (r *SyncController) enqueuePhysical(ctx context.Context, obj client.Object, q workqueue.RateLimitingInterface, isDelete bool) { +func (r *SyncController) enqueuePhysical(ctx context.Context, obj client.Object, q workqueue.TypedRateLimitingInterface[ctrl.Request], isDelete bool) { if obj == nil { return } diff --git a/pkg/syncer/syncer_test.go b/pkg/syncer/syncer_test.go index b21473315b..f18421f823 100644 --- a/pkg/syncer/syncer_test.go +++ b/pkg/syncer/syncer_test.go @@ -98,7 +98,7 @@ var ( type fakeSource struct { m sync.Mutex - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[ctrl.Request] } func (f *fakeSource) String() string { @@ -112,7 +112,7 @@ func (f *fakeSource) Add(request reconcile.Request) { f.queue.Add(request) } -func (f *fakeSource) Start(_ context.Context, queue workqueue.RateLimitingInterface) error { +func (f *fakeSource) Start(_ context.Context, queue workqueue.TypedRateLimitingInterface[ctrl.Request]) error { f.m.Lock() defer f.m.Unlock() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE deleted file mode 100644 index 52cf18e425..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright 2021 The ANTLR Project - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go deleted file mode 100644 index ab51212676..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Package antlr implements the Go version of the ANTLR 4 runtime. - -# The ANTLR Tool - -ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, -or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. -From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface -(or visitor) that makes it easy to respond to the recognition of phrases of interest. - -# Code Generation - -ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a -runtime library, written specifically to support the generated code in the target language. This library is the -runtime for the Go target. - -To generate code for the go target, it is generally recommended to place the source grammar files in a package of -their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory -it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean -that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other -way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in -your IDE, or configuration in your CI system. - -Here is a general template for an ANTLR based recognizer in Go: - - . - ├── myproject - ├── parser - │ ├── mygrammar.g4 - │ ├── antlr-4.12.0-complete.jar - │ ├── error_listeners.go - │ ├── generate.go - │ ├── generate.sh - ├── go.mod - ├── go.sum - ├── main.go - └── main_test.go - -Make sure that the package statement in your grammar file(s) reflects the go package they exist in. -The generate.go file then looks like this: - - package parser - - //go:generate ./generate.sh - -And the generate.sh file will look similar to this: - - #!/bin/sh - - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' - antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 - -depending on whether you want visitors or listeners or any other ANTLR options. - -From the command line at the root of your package “myproject” you can then simply issue the command: - - go generate ./... - -# Copyright Notice - -Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - -Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. - -[target languages]: https://github.com/antlr/antlr4/tree/master/runtime -[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt -*/ -package antlr diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go deleted file mode 100644 index 7619fa172e..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - Equals(o Collectable[ATNConfig]) bool - Hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// Equals is the default comparison function for an ATNConfig when no specialist implementation is required -// for a collection. -// -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { - if b == o { - return true - } else if o == nil { - return false - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.Equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.Equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -// Hash is the default hash function for BaseATNConfig, when no specialist hash function -// is required for a collection -func (b *BaseATNConfig) Hash() int { - var c int - if b.context != nil { - c = b.context.Hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.Hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.GetStateNumber()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.Hash()) - h = murmurUpdate(h, l.semanticContext.Hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.Hash()) - h = murmurFinish(h, 6) - return h -} - -// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { - if l == other { - return true - } - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.Equals(othert.BaseATNConfig) -} - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go deleted file mode 100644 index 43e9b33f3b..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type ATNConfigSet interface { - Hash() int - Equals(o Collectable[ATNConfig]) bool - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *JStore[ATNState, Comparator[ATNState]] - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - Alts() *BitSet - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *JStore[ATNConfig, Comparator[ATNConfig]] - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not, protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func (b *BaseATNConfigSet) Alts() *BitSet { - alts := NewBitSet() - for _, it := range b.configs { - alts.add(it.GetAlt()) - } - return alts -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing, present := b.configLookup.Put(config) - - // The config was not already in the set - // - if !present { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { - - // states uses the standard comparator provided by the ATNState instance - // - states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst) - - for i := 0; i < len(b.configs); i++ { - states.Put(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.Len() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -// Compare is a hack function just to verify that adding DFAstares to the known -// set works, so long as comparison of ATNConfigSet s works. For that to work, we -// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't -// know the order, so we do this inefficient hack. If this proves the point, then -// we can change the config set to a better structure. -func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { - if len(b.configs) != len(bs.configs) { - return false - } - - for _, c := range b.configs { - found := false - for _, c2 := range bs.configs { - if c.Equals(c2) { - found = true - break - } - } - - if !found { - return false - } - - } - return true -} - -func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext && - b.Compare(other2) -} - -func (b *BaseATNConfigSet) Hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := 1 - for _, config := range b.configs { - h = 31*h + config.Hash() - } - return h -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - // This set uses the standard Hash() and Equals() from ATNConfig - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func hashATNConfig(i interface{}) int { - o := i.(ATNConfig) - hash := 7 - hash = 31*hash + o.GetState().GetStateNumber() - hash = 31*hash + o.GetAlt() - hash = 31*hash + o.GetSemanticContext().Hash() - return hash -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() { - return false - } - - if ai.GetAlt() != bi.GetAlt() { - return false - } - - return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go deleted file mode 100644 index a8b889cedb..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go deleted file mode 100644 index e5a74f0c6c..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go +++ /dev/null @@ -1,198 +0,0 @@ -package antlr - -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -import ( - "sort" -) - -// Collectable is an interface that a struct should implement if it is to be -// usable as a key in these collections. -type Collectable[T any] interface { - Hash() int - Equals(other Collectable[T]) bool -} - -type Comparator[T any] interface { - Hash1(o T) int - Equals2(T, T) bool -} - -// JStore implements a container that allows the use of a struct to calculate the key -// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just -// serve the needs of the ANTLR Go runtime. -// -// For ease of porting the logic of the runtime from the master target (Java), this collection -// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() -// function as the key. The values are stored in a standard go map which internally is a form of hashmap -// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with -// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't -// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and -// we understand the requirements, then this is fine - this is not a general purpose collection. -type JStore[T any, C Comparator[T]] struct { - store map[int][]T - len int - comparator Comparator[T] -} - -func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { - - if comparator == nil { - panic("comparator cannot be nil") - } - - s := &JStore[T, C]{ - store: make(map[int][]T, 1), - comparator: comparator, - } - return s -} - -// Put will store given value in the collection. Note that the key for storage is generated from -// the value itself - this is specifically because that is what ANTLR needs - this would not be useful -// as any kind of general collection. -// -// If the key has a hash conflict, then the value will be added to the slice of values associated with the -// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is -// tested by calling the equals() method on the key. -// -// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true -// -// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. -func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn - - kh := s.comparator.Hash1(value) - - for _, v1 := range s.store[kh] { - if s.comparator.Equals2(value, v1) { - return v1, true - } - } - s.store[kh] = append(s.store[kh], value) - s.len++ - return value, false -} - -// Get will return the value associated with the key - the type of the key is the same type as the value -// which would not generally be useful, but this is a specific thing for ANTLR where the key is -// generated using the object we are going to store. -func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn - - kh := s.comparator.Hash1(key) - - for _, v := range s.store[kh] { - if s.comparator.Equals2(key, v) { - return v, true - } - } - return key, false -} - -// Contains returns true if the given key is present in the store -func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn - - _, present := s.Get(key) - return present -} - -func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { - vs := make([]T, 0, len(s.store)) - for _, v := range s.store { - vs = append(vs, v...) - } - sort.Slice(vs, func(i, j int) bool { - return less(vs[i], vs[j]) - }) - - return vs -} - -func (s *JStore[T, C]) Each(f func(T) bool) { - for _, e := range s.store { - for _, v := range e { - f(v) - } - } -} - -func (s *JStore[T, C]) Len() int { - return s.len -} - -func (s *JStore[T, C]) Values() []T { - vs := make([]T, 0, len(s.store)) - for _, e := range s.store { - for _, v := range e { - vs = append(vs, v) - } - } - return vs -} - -type entry[K, V any] struct { - key K - val V -} - -type JMap[K, V any, C Comparator[K]] struct { - store map[int][]*entry[K, V] - len int - comparator Comparator[K] -} - -func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { - return &JMap[K, V, C]{ - store: make(map[int][]*entry[K, V], 1), - comparator: comparator, - } -} - -func (m *JMap[K, V, C]) Put(key K, val V) { - kh := m.comparator.Hash1(key) - - m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) - m.len++ -} - -func (m *JMap[K, V, C]) Values() []V { - vs := make([]V, 0, len(m.store)) - for _, e := range m.store { - for _, v := range e { - vs = append(vs, v.val) - } - } - return vs -} - -func (m *JMap[K, V, C]) Get(key K) (V, bool) { - - var none V - kh := m.comparator.Hash1(key) - for _, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - return e.val, true - } - } - return none, false -} - -func (m *JMap[K, V, C]) Len() int { - return len(m.store) -} - -func (m *JMap[K, V, C]) Delete(key K) { - kh := m.comparator.Hash1(key) - for i, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) - m.len-- - return - } - } -} - -func (m *JMap[K, V, C]) Clear() { - m.store = make(map[int][]*entry[K, V]) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go deleted file mode 100644 index ba62af3610..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "golang.org/x/exp/slices" - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - Hash() int - Equals(interface{}) bool - GetParent(int) PredictionContext - getReturnState(int) int - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.Hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -var _emptyPredictionContextHash int - -func init() { - _emptyPredictionContextHash = murmurInit(1) - _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) -} - -func calculateEmptyHash() int { - return _emptyPredictionContextHash -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - var cachedHash int - if parent != nil { - cachedHash = calculateHash(parent, returnState) - } else { - cachedHash = calculateEmptyHash() - } - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(cachedHash) - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { - if b == other { - return true - } - if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != otherP.getReturnState(0) { - return false - } - if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.Equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - p.cachedHash = calculateEmptyHash() - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) Hash() int { - return e.cachedHash -} - -func (e *EmptyPredictionContext) Equals(other interface{}) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - hash := murmurInit(1) - - for _, parent := range parents { - hash = murmurUpdate(hash, parent.Hash()) - } - - for _, returnState := range returnStates { - hash = murmurUpdate(hash, returnState) - } - - hash = murmurFinish(hash, len(parents)<<1) - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(hash) - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -// Equals is the default comparison function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Equals(o interface{}) bool { - if a == o { - return true - } - other, ok := o.(*ArrayPredictionContext) - if !ok { - return false - } - if a.cachedHash != other.Hash() { - return false // can't be same if hash is different - } - - // Must compare the actual array elements and not just the array address - // - return slices.Equal(a.returnStates, other.returnStates) && - slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { - return x.Equals(y) - }) -} - -// Hash is the default hash function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Hash() int { - return a.BasePredictionContext.cachedHash -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - - // Share same graph if both same - // - if a == b || a.Equals(b) { - return a - } - - // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test - // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created - // from it. - // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion - // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from - // either of them. - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - - // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters - // here. - // - // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here - - var arp, arb *ArrayPredictionContext - var ok bool - if arp, ok = a.(*ArrayPredictionContext); ok { - } else if _, ok = a.(*BaseSingletonPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } else if _, ok = a.(*EmptyPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - if arb, ok = b.(*ArrayPredictionContext); ok { - } else if _, ok = b.(*BaseSingletonPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } else if _, ok = b.(*EmptyPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - // Both arp and arb - return mergeArrays(arp, arb, rootIsWildcard, mergeCache) -} - -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc -} - -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems - if M == a { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), a) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), b) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), M) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) - } - return M -} - -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go deleted file mode 100644 index 7b9b72fab1..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -// -// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

-// -//

When the ATN simulation reaches the state before {@code ”}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x”} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -// -// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) - - for _, c := range configs.GetItems() { - - alts, ok := configToAlts.Get(c) - if !ok { - alts = NewBitSet() - configToAlts.Put(c, alts) - } - alts.add(c.GetAlt()) - } - - return configToAlts.Values() -} - -// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go deleted file mode 100644 index 210699ba23..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go deleted file mode 100644 index c9bd6751e3..0000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go +++ /dev/null @@ -1,235 +0,0 @@ -package antlr - -import "math" - -const ( - _initalCapacity = 16 - _initalBucketCapacity = 8 - _loadFactor = 0.75 -) - -type Set interface { - Add(value interface{}) (added interface{}) - Len() int - Get(value interface{}) (found interface{}) - Contains(value interface{}) bool - Values() []interface{} - Each(f func(interface{}) bool) -} - -type array2DHashSet struct { - buckets [][]Collectable[any] - hashcodeFunction func(interface{}) int - equalsFunction func(Collectable[any], Collectable[any]) bool - - n int // How many elements in set - threshold int // when to expand - - currentPrime int // jump by 4 primes each expand or whatever - initialBucketCapacity int -} - -func (as *array2DHashSet) Each(f func(interface{}) bool) { - if as.Len() < 1 { - return - } - - for _, bucket := range as.buckets { - for _, o := range bucket { - if o == nil { - break - } - if !f(o) { - return - } - } - } -} - -func (as *array2DHashSet) Values() []interface{} { - if as.Len() < 1 { - return nil - } - - values := make([]interface{}, 0, as.Len()) - as.Each(func(i interface{}) bool { - values = append(values, i) - return true - }) - return values -} - -func (as *array2DHashSet) Contains(value Collectable[any]) bool { - return as.Get(value) != nil -} - -func (as *array2DHashSet) Add(value Collectable[any]) interface{} { - if as.n > as.threshold { - as.expand() - } - return as.innerAdd(value) -} - -func (as *array2DHashSet) expand() { - old := as.buckets - - as.currentPrime += 4 - - var ( - newCapacity = len(as.buckets) << 1 - newTable = as.createBuckets(newCapacity) - newBucketLengths = make([]int, len(newTable)) - ) - - as.buckets = newTable - as.threshold = int(float64(newCapacity) * _loadFactor) - - for _, bucket := range old { - if bucket == nil { - continue - } - - for _, o := range bucket { - if o == nil { - break - } - - b := as.getBuckets(o) - bucketLength := newBucketLengths[b] - var newBucket []Collectable[any] - if bucketLength == 0 { - // new bucket - newBucket = as.createBucket(as.initialBucketCapacity) - newTable[b] = newBucket - } else { - newBucket = newTable[b] - if bucketLength == len(newBucket) { - // expand - newBucketCopy := make([]Collectable[any], len(newBucket)<<1) - copy(newBucketCopy[:bucketLength], newBucket) - newBucket = newBucketCopy - newTable[b] = newBucket - } - } - - newBucket[bucketLength] = o - newBucketLengths[b]++ - } - } -} - -func (as *array2DHashSet) Len() int { - return as.n -} - -func (as *array2DHashSet) Get(o Collectable[any]) interface{} { - if o == nil { - return nil - } - - b := as.getBuckets(o) - bucket := as.buckets[b] - if bucket == nil { // no bucket - return nil - } - - for _, e := range bucket { - if e == nil { - return nil // empty slot; not there - } - if as.equalsFunction(e, o) { - return e - } - } - - return nil -} - -func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { - b := as.getBuckets(o) - - bucket := as.buckets[b] - - // new bucket - if bucket == nil { - bucket = as.createBucket(as.initialBucketCapacity) - bucket[0] = o - - as.buckets[b] = bucket - as.n++ - return o - } - - // look for it in bucket - for i := 0; i < len(bucket); i++ { - existing := bucket[i] - if existing == nil { // empty slot; not there, add. - bucket[i] = o - as.n++ - return o - } - - if as.equalsFunction(existing, o) { // found existing, quit - return existing - } - } - - // full bucket, expand and add to end - oldLength := len(bucket) - bucketCopy := make([]Collectable[any], oldLength<<1) - copy(bucketCopy[:oldLength], bucket) - bucket = bucketCopy - as.buckets[b] = bucket - bucket[oldLength] = o - as.n++ - return o -} - -func (as *array2DHashSet) getBuckets(value Collectable[any]) int { - hash := as.hashcodeFunction(value) - return hash & (len(as.buckets) - 1) -} - -func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { - return make([][]Collectable[any], cap) -} - -func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { - return make([]Collectable[any], cap) -} - -func newArray2DHashSetWithCap( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, - initCap int, - initBucketCap int, -) *array2DHashSet { - if hashcodeFunction == nil { - hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - equalsFunction = standardEqualsFunction - } - - ret := &array2DHashSet{ - hashcodeFunction: hashcodeFunction, - equalsFunction: equalsFunction, - - n: 0, - threshold: int(math.Floor(_initalCapacity * _loadFactor)), - - currentPrime: 1, - initialBucketCapacity: initBucketCap, - } - - ret.buckets = ret.createBuckets(initCap) - return ret -} - -func newArray2DHashSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, -) *array2DHashSet { - return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) -} diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore new file mode 100644 index 0000000000..38ea34ff51 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore @@ -0,0 +1,18 @@ +### Go template + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + + +# Go workspace file +go.work + +# No Goland stuff in this repo +.idea diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE new file mode 100644 index 0000000000..a22292eb5a --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md new file mode 100644 index 0000000000..03e5b83eb1 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/README.md @@ -0,0 +1,54 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr) +[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/) +# ANTLR4 Go Runtime Module Repo + +IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here. + + - Do not submit PRs or any change requests to this repo + - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR + - This repo contains the Go runtime that your generated projects should import + +## Introduction + +This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained +at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create +the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here. + +The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically. + +### Why? + +The `go get` command is unable to retrieve the Go runtime when it is embedded so +deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime, +does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and +causes confusion. + +For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc +) +``` + +Where you would expect to see: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0 +) +``` + +The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and +from whence users can expect `go get` to behave as expected. + + +# Documentation +Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on +migrating existing projects to use the new module location and for information on how to use the Go runtime in +general. diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go new file mode 100644 index 0000000000..48bd362bf5 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -0,0 +1,102 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Go Runtime + +At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime +source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of +ANTLR4 that it is compatible with (I.E. uses the /v4 path). + +However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root +of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. +This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not +list the release tag such as @4.13.1 - this was confusing, to say the least. + +As of 4.13.0, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, +which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. + +This means that if you are using the source code without modules, you should also use the source code in the [new repo]. +Though we highly recommend that you use go modules, as they are now idiomatic for Go. + +I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good. + +Go runtime author: [Jim Idle] jimi@idle.ws + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as +it was at any point in its history. + +Here is a general/recommended template for an ANTLR based recognizer in Go: + + . + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.13.1-complete.jar + │ ├── generate.go + │ └── generate.sh + ├── parsing - generated code goes here + │ └── error_listeners.go + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in. + +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here +is to generate the code into a + +From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command: + + go generate ./... + +Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code +by importing the parsing package. + +There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like. + +# Copyright Notice + +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md +[new repo]: https://github.com/antlr4-go/antlr +[Jim Idle]: https://github.com/jimidle +[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md +*/ +package antlr diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go similarity index 94% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go rename to vendor/github.com/antlr4-go/antlr/v4/atn.go index 98010d2e6e..e749ebd0cf 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -4,8 +4,6 @@ package antlr -import "sync" - // ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or // which is invalid for a particular struct such as [*antlr.BaseRuleContext] var ATNInvalidAltNumber int @@ -20,10 +18,11 @@ var ATNInvalidAltNumber int // [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf // [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we + + // DecisionToState is the decision points for all rules, sub-rules, optional + // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we // can go back later and build DFA predictors for them. This includes - // all the rules, subrules, optional blocks, ()+, ()* etc... + // all the rules, sub-rules, optional blocks, ()+, ()* etc... DecisionToState []DecisionState // grammarType is the ATN type and is used for deserializing ATNs from strings. @@ -51,11 +50,13 @@ type ATN struct { // specified, and otherwise is nil. ruleToTokenType []int + // ATNStates is a list of all states in the ATN, ordered by state number. + // states []ATNState - mu sync.Mutex - stateMu sync.RWMutex - edgeMu sync.RWMutex + mu Mutex + stateMu RWMutex + edgeMu RWMutex } // NewATN returns a new ATN struct representing the given grammarType and is used diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go new file mode 100644 index 0000000000..267308bb3d --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -0,0 +1,332 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +const ( + lexerConfig = iota // Indicates that this ATNConfig is for a lexer + parserConfig // Indicates that this ATNConfig is for a parser +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive in the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context *PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int + cType int // lexerConfig or parserConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only +func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + return NewATNConfig5(state, alt, context, SemanticContextNone) +} + +// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context +func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + pac := &ATNConfig{} + pac.state = state + pac.alt = alt + pac.context = context + pac.semanticContext = semanticContext + pac.cType = parserConfig + return pac +} + +// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only +func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context +func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), semanticContext) +} + +// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only +func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only +func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + return NewATNConfig(c, state, context, c.GetSemanticContext()) +} + +// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' +// are just wrappers around this one. +func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + b := &ATNConfig{} + b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) + b.cType = parserConfig + return b +} + +func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) { + + a.state = state + a.alt = alt + a.context = context + a.semanticContext = semanticContext + a.reachesIntoOuterContext = c.GetReachesIntoOuterContext() + a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed() +} + +func (a *ATNConfig) getPrecedenceFilterSuppressed() bool { + return a.precedenceFilterSuppressed +} + +func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) { + a.precedenceFilterSuppressed = v +} + +// GetState returns the ATN state associated with this configuration +func (a *ATNConfig) GetState() ATNState { + return a.state +} + +// GetAlt returns the alternative associated with this configuration +func (a *ATNConfig) GetAlt() int { + return a.alt +} + +// SetContext sets the rule invocation stack associated with this configuration +func (a *ATNConfig) SetContext(v *PredictionContext) { + a.context = v +} + +// GetContext returns the rule invocation stack associated with this configuration +func (a *ATNConfig) GetContext() *PredictionContext { + return a.context +} + +// GetSemanticContext returns the semantic context associated with this configuration +func (a *ATNConfig) GetSemanticContext() SemanticContext { + return a.semanticContext +} + +// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration +func (a *ATNConfig) GetReachesIntoOuterContext() int { + return a.reachesIntoOuterContext +} + +// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration +func (a *ATNConfig) SetReachesIntoOuterContext(v int) { + a.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool { + switch a.cType { + case lexerConfig: + return a.LEquals(o) + case parserConfig: + return a.PEquals(o) + default: + panic("Invalid ATNConfig type") + } +} + +// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool { + var other, ok = o.(*ATNConfig) + + if !ok { + return false + } + if a == other { + return true + } else if other == nil { + return false + } + + var equal bool + + if a.context == nil { + equal = other.context == nil + } else { + equal = a.context.Equals(other.context) + } + + var ( + nums = a.state.GetStateNumber() == other.state.GetStateNumber() + alts = a.alt == other.alt + cons = a.semanticContext.Equals(other.semanticContext) + sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) Hash() int { + switch a.cType { + case lexerConfig: + return a.LHash() + case parserConfig: + return a.PHash() + default: + panic("Invalid ATNConfig type") + } +} + +// PHash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) PHash() int { + var c int + if a.context != nil { + c = a.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, a.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +// String returns a string representation of the ATNConfig, usually used for debugging purposes +func (a *ATNConfig) String() string { + var s1, s2, s3 string + + if a.context != nil { + s1 = ",[" + fmt.Sprint(a.context) + "]" + } + + if a.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(a.semanticContext) + } + + if a.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3) +} + +func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +//goland:noinspection GoUnusedExportedFunction +func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LHash() int { + var f int + if a.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, a.context.Hash()) + h = murmurUpdate(h, a.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, a.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool { + var otherT, ok = other.(*ATNConfig) + if !ok { + return false + } else if a == otherT { + return true + } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision { + return false + } + + switch { + case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil: + return true + case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil: + if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) { + return false + } + default: + return false // One but not both, are nil + } + + return a.PEquals(otherT) +} + +func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go new file mode 100644 index 0000000000..52dbaf8064 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go @@ -0,0 +1,301 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type ATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two ATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]] + + // configs is the added elements that did not match an existing key in configLookup + configs []*ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the ATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this set. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +// Alts returns the combined set of alts for all the configurations in this set. +func (b *ATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +// NewATNConfigSet creates a new ATNConfigSet instance. +func NewATNConfigSet(fullCtx bool) *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), +// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and +// 'pi' is the [ATNConfig].semanticContext. +// +// We use (s,i,pi) as the key. +// Updates dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +// GetStates returns the set of states represented by all configurations in this config set +func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator and Hash() provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()") + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *ATNConfigSet) GetPredicates() []SemanticContext { + predicates := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + predicates = append(predicates, c) + } + } + + return predicates +} + +func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + // Empty indicate no optimization is possible + if b.configLookup == nil || b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare The configs are only equal if they are in the same order and their Equals function returns true. +// Java uses ArrayList.equals(), which requires the same order. +func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + for i := 0; i < len(b.configs); i++ { + if !b.configs[i].Equals(bs.configs[i]) { + return false + } + } + + return true +} + +func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*ATNConfigSet); !ok { + return false + } + + other2 := other.(*ATNConfigSet) + var eca bool + switch { + case b.conflictingAlts == nil && other2.conflictingAlts == nil: + eca = true + case b.conflictingAlts != nil && other2.conflictingAlts != nil: + eca = b.conflictingAlts.equals(other2.conflictingAlts) + } + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + eca && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *ATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *ATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *ATNConfigSet) Contains(item *ATNConfig) bool { + if b.readOnly { + panic("not implemented for read-only sets") + } + if b.configLookup == nil { + return false + } + return b.configLookup.Contains(item) +} + +func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool { + return b.Contains(item) +} + +func (b *ATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + b.configs = make([]*ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()") +} + +func (b *ATNConfigSet) String() string { + + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair +// for use in lexers. +func NewOrderedATNConfigSet() *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + // This set uses the standard Hash() and Equals() from ATNConfig + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"), + fullCtx: false, + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go similarity index 86% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go index 3c975ec7bf..bdb30b3622 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go @@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool { func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.readOnly = readOnly } @@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool { func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.verifyATN = verifyATN } @@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.generateRuleBypassTransitions = generateRuleBypassTransitions } +//goland:noinspection GoUnusedExportedFunction func DefaultATNDeserializationOptions() *ATNDeserializationOptions { return NewATNDeserializationOptions(&defaultATNDeserializationOptions) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go similarity index 97% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go index 3888856b4b..2dcb9ae11b 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go @@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { return &ATNDeserializer{options: options} } +//goland:noinspection GoUnusedFunction func stringInSlice(a string, list []string) int { for i, b := range list { if b == a { @@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) { } } -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet { +func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet { m := a.readInt() // Preallocate the needed capacity. @@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { bypassStart.endState = bypassStop - atn.defineDecisionState(bypassStart.BaseDecisionState) + atn.defineDecisionState(&bypassStart.BaseDecisionState) bypassStop.startState = bypassStart @@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { continue } - // We analyze the ATN to determine if a ATN decision state is the + // We analyze the [ATN] to determine if an ATN decision state is the // decision for the closure block that determines whether a // precedence rule should continue or complete. if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { @@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int { return int(v) // data is 32 bits but int is at least that big } -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { target := atn.states[trg] switch typeIndex { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go similarity index 66% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go index 41529115fa..afe6c9f809 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go @@ -4,7 +4,7 @@ package antlr -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false)) type IATNSimulator interface { SharedContextCache() *PredictionContextCache @@ -18,22 +18,13 @@ type BaseATNSimulator struct { decisionToDFA []*DFA } -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { +func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext { if b.sharedContextCache == nil { return context } - visited := make(map[PredictionContext]PredictionContext) - + //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()") + visited := NewVisitRecord() return getCachedBasePredictionContext(context, b.sharedContextCache, visited) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go similarity index 65% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_state.go index 1f2a56bc31..2ae5807cdb 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go @@ -4,7 +4,11 @@ package antlr -import "strconv" +import ( + "fmt" + "os" + "strconv" +) // Constants for serialization. const ( @@ -25,6 +29,7 @@ const ( ATNStateInvalidStateNumber = -1 ) +//goland:noinspection GoUnusedGlobalVariable var ATNStateInitialNumTransitions = 4 type ATNState interface { @@ -73,7 +78,7 @@ type BaseATNState struct { transitions []Transition } -func NewBaseATNState() *BaseATNState { +func NewATNState() *BaseATNState { return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} } @@ -148,27 +153,46 @@ func (as *BaseATNState) AddTransition(trans Transition, index int) { if len(as.transitions) == 0 { as.epsilonOnlyTransitions = trans.getIsEpsilon() } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber) as.epsilonOnlyTransitions = false } + // TODO: Check code for already present compared to the Java equivalent + //alreadyPresent := false + //for _, t := range as.transitions { + // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() { + // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) { + // alreadyPresent = true + // break + // } + // } else if t.getIsEpsilon() && trans.getIsEpsilon() { + // alreadyPresent = true + // break + // } + //} + //if !alreadyPresent { if index == -1 { as.transitions = append(as.transitions, trans) } else { as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) // TODO: as.transitions.splice(index, 1, trans) } + //} else { + // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber) + //} } type BasicState struct { - *BaseATNState + BaseATNState } func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} + return &BasicState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + } } type DecisionState interface { @@ -182,13 +206,19 @@ type DecisionState interface { } type BaseDecisionState struct { - *BaseATNState + BaseATNState decision int nonGreedy bool } func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} + return &BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + } } func (s *BaseDecisionState) getDecision() int { @@ -216,12 +246,20 @@ type BlockStartState interface { // BaseBlockStartState is the start of a regular (...) block. type BaseBlockStartState struct { - *BaseDecisionState + BaseDecisionState endState *BlockEndState } func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} + return &BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + }, + } } func (s *BaseBlockStartState) getEndState() *BlockEndState { @@ -233,31 +271,38 @@ func (s *BaseBlockStartState) setEndState(b *BlockEndState) { } type BasicBlockStartState struct { - *BaseBlockStartState + BaseBlockStartState } func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} + return &BasicBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockStart, + }, + }, + }, + } } var _ BlockStartState = &BasicBlockStartState{} // BlockEndState is a terminal node of a simple (a|b|c) block. type BlockEndState struct { - *BaseATNState + BaseATNState startState ATNState } func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} + return &BlockEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockEnd, + }, + startState: nil, + } } // RuleStopState is the last node in the ATN for a rule, unless that rule is the @@ -265,43 +310,48 @@ func NewBlockEndState() *BlockEndState { // encode references to all calls to this rule to compute FOLLOW sets for error // handling. type RuleStopState struct { - *BaseATNState + BaseATNState } func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} + return &RuleStopState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStop, + }, + } } type RuleStartState struct { - *BaseATNState + BaseATNState stopState ATNState isPrecedenceRule bool } func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} + return &RuleStartState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStart, + }, + } } // PlusLoopbackState is a decision state for A+ and (A|B)+. It has two // transitions: one to the loop back to start of the block, and one to exit. type PlusLoopbackState struct { - *BaseDecisionState + BaseDecisionState } func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} + return &PlusLoopbackState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusLoopBack, + }, + }, + } } // PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a @@ -309,85 +359,103 @@ func NewPlusLoopbackState() *PlusLoopbackState { // it is included for completeness. In reality, PlusLoopbackState is the real // decision-making node for A+. type PlusBlockStartState struct { - *BaseBlockStartState + BaseBlockStartState loopBackState ATNState } func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} + return &PlusBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusBlockStart, + }, + }, + }, + } } var _ BlockStartState = &PlusBlockStartState{} // StarBlockStartState is the block that begins a closure loop. type StarBlockStartState struct { - *BaseBlockStartState + BaseBlockStartState } func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} + return &StarBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarBlockStart, + }, + }, + }, + } } var _ BlockStartState = &StarBlockStartState{} type StarLoopbackState struct { - *BaseATNState + BaseATNState } func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} + return &StarLoopbackState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopBack, + }, + } } type StarLoopEntryState struct { - *BaseDecisionState + BaseDecisionState loopBackState ATNState precedenceRuleDecision bool } func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} + return &StarLoopEntryState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopEntry, + }, + }, + } } // LoopEndState marks the end of a * or + loop. type LoopEndState struct { - *BaseATNState + BaseATNState loopBackState ATNState } func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} + return &LoopEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateLoopEnd, + }, + } } // TokensStartState is the Tokens rule start state linking to each lexer rule start state. type TokensStartState struct { - *BaseDecisionState + BaseDecisionState } func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} + return &TokensStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateTokenStart, + }, + }, + } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_type.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go similarity index 89% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/char_stream.go index c33f0adb5e..bd8127b6b5 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go @@ -8,5 +8,5 @@ type CharStream interface { IntStream GetText(int, int) string GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go rename to vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go similarity index 88% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go index c6c9485a20..b75da9df08 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go @@ -28,22 +28,24 @@ type CommonTokenStream struct { // trivial with bt field. fetchedEOF bool - // index indexs into tokens of the current token (next token to consume). + // index into [tokens] of the current token (next token to consume). // tokens[p] should be LT(1). It is set to -1 when the stream is first // constructed or when SetTokenSource is called, indicating that the first token // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. + // see the documentation of [IntStream] for a description of initializing methods. index int - // tokenSource is the TokenSource from which tokens for the bt stream are + // tokenSource is the [TokenSource] from which tokens for the bt stream are // fetched. tokenSource TokenSource - // tokens is all tokens fetched from the token source. The list is considered a + // tokens contains all tokens fetched from the token source. The list is considered a // complete view of the input once fetchedEOF is set to true. tokens []Token } +// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce +// tokens and will pull tokens from the given lexer channel. func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { return &CommonTokenStream{ channel: channel, @@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { } } +// GetAllTokens returns all tokens currently pulled from the token source. func (c *CommonTokenStream) GetAllTokens() []Token { return c.tokens } @@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int { return 0 } -func (c *CommonTokenStream) Release(marker int) {} +func (c *CommonTokenStream) Release(_ int) {} -func (c *CommonTokenStream) reset() { +func (c *CommonTokenStream) Reset() { + c.fetchedEOF = false + c.tokens = make([]Token, 0) c.Seek(0) } @@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() { // Sync makes sure index i in tokens has a token and returns true if a token is // located at index i and otherwise false. func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + n := i - len(c.tokens) + 1 // How many more elements do we need? if n > 0 { fetched := c.fetch(n) @@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { c.tokenSource = tokenSource c.tokens = make([]Token, 0) c.index = -1 + c.fetchedEOF = false } // NextTokenOnChannel returns the index of the next token on channel given a // starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { +// no tokens on channel between 'i' and [TokenEOF]. +func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int { c.Sync(i) if i >= len(c.tokens) { @@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) from := tokenIndex + 1 - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token var to int if nextOnChannel == -1 { @@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int { } func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) + c.Fill() + return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1)) } func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { @@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string return c.GetTextFromInterval(interval.GetSourceInterval()) } -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { +func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string { c.lazyInit() - - if interval == nil { - c.Fill() - interval = NewInterval(0, len(c.tokens)-1) - } else { - c.Sync(interval.Stop) - } + c.Sync(interval.Stop) start := interval.Start stop := interval.Stop diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go similarity index 82% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go rename to vendor/github.com/antlr4-go/antlr/v4/comparators.go index 9ea3200536..7467e9b43d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go +++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go @@ -18,17 +18,20 @@ package antlr // type safety and avoid having to implement this for every type that we want to perform comparison on. // // This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which -// allows us to use it in any collection instance that does nto require a special hash or equals implementation. +// allows us to use it in any collection instance that does not require a special hash or equals implementation. type ObjEqComparator[T Collectable[T]] struct{} var ( - aStateEqInst = &ObjEqComparator[ATNState]{} - aConfEqInst = &ObjEqComparator[ATNConfig]{} - aConfCompInst = &ATNConfigComparator[ATNConfig]{} - atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{} + aStateEqInst = &ObjEqComparator[ATNState]{} + aConfEqInst = &ObjEqComparator[*ATNConfig]{} + + // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache + aConfCompInst = &ATNConfigComparator[*ATNConfig]{} + atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{} dfaStateEqInst = &ObjEqComparator[*DFAState]{} semctxEqInst = &ObjEqComparator[SemanticContext]{} - atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{} + atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{} + pContextEqInst = &ObjEqComparator[*PredictionContext]{} ) // Equals2 delegates to the Equals() method of type T @@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int { type SemCComparator[T Collectable[T]] struct{} -// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet +// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type ATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int { + hash := 7 hash = 31*hash + o.GetState().GetStateNumber() hash = 31*hash + o.GetAlt() @@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int { h := murmurInit(7) h = murmurUpdate(h, o.GetState().GetStateNumber()) h = murmurUpdate(h, o.GetContext().Hash()) return murmurFinish(h, 2) } -// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet +// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type BaseATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet -func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just // delegates to the standard Hash() method of the ATNConfig type. -func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { - +func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int { return o.Hash() } diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go new file mode 100644 index 0000000000..c2b724514d --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go @@ -0,0 +1,214 @@ +package antlr + +type runtimeConfiguration struct { + statsTraceStacks bool + lexerATNSimulatorDebug bool + lexerATNSimulatorDFADebug bool + parserATNSimulatorDebug bool + parserATNSimulatorTraceATNSim bool + parserATNSimulatorDFADebug bool + parserATNSimulatorRetryDebug bool + lRLoopEntryBranchOpt bool + memoryManager bool +} + +// Global runtime configuration +var runtimeConfig = runtimeConfiguration{ + lRLoopEntryBranchOpt: true, +} + +type runtimeOption func(*runtimeConfiguration) error + +// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options. +// It uses the functional options pattern for go. This is a package global function as it operates on the runtime +// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is +// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the +// only maintainer). However, it is possible that you might want to use this to set a global option concerning the +// memory allocation type used by the runtime such as sync.Pool or not. +// +// The options are applied in the order they are passed in, so the last option will override any previous options. +// +// For example, if you want to turn on the collection create point stack flag to true, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// If you want to turn it off, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func ConfigureRuntime(options ...runtimeOption) error { + for _, option := range options { + err := option(&runtimeConfig) + if err != nil { + return err + } + } + return nil +} + +// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of +// certain structs, such as collections, or the use point of certain methods such as Put(). +// Because this can be expensive, it is turned off by default. However, it +// can be useful to track down exactly where memory is being created and used. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func WithStatsTraceStacks(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.statsTraceStacks = trace + return nil + } +} + +// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false)) +func WithLexerATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDebug = debug + return nil + } +} + +// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false)) +func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false)) +func WithParserATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDebug = debug + return nil + } +} + +// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator +// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false)) +func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorTraceATNSim = trace + return nil + } +} + +// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false)) +func WithParserATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime. +// Only useful to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false)) +func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorRetryDebug = debug + return nil + } +} + +// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be +// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime. +// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator]. +// +// Note that default is to use this optimization. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false)) +func WithLRLoopEntryBranchOpt(off bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lRLoopEntryBranchOpt = off + return nil + } +} + +// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful +// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which +// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute +// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource +// grammar, this may help make it more practical. +// +// Note that default is to use normal Go memory allocation and not pool memory. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithMemoryManager(true)) +// +// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other +// and should remember to nil out any references to the parser or lexer when you are done with them. +func WithMemoryManager(use bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.memoryManager = use + return nil + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go similarity index 76% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa.go index bfd43e1f73..6b63eb1589 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go @@ -4,6 +4,8 @@ package antlr +// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can +// reach and the transitions between them. type DFA struct { // atnStartState is the ATN state in which this was created atnStartState DecisionState @@ -12,10 +14,9 @@ type DFA struct { // states is all the DFA states. Use Map to get the old state back; Set can only // indicate whether it is there. Go maps implement key hash collisions and so on and are very - // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva + // good, but the DFAState is an object and can't be used directly as the key as it can in say Java // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them - // to see if they really are the same object. - // + // to see if they really are the same object. Hence, we have our own map storage. // states *JStore[*DFAState, *ObjEqComparator[*DFAState]] @@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA { dfa := &DFA{ atnStartState: atnStartState, decision: decision, - states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst), + states: nil, // Lazy initialize } if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { dfa.precedenceDfa = true - dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false)) + dfa.s0 = NewDFAState(-1, NewATNConfigSet(false)) dfa.s0.isAcceptState = false dfa.s0.requiresFullContext = false } @@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool { // true or nil otherwise, and d.precedenceDfa is updated. func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { if d.getPrecedenceDfa() != precedenceDfa { - d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst) + d.states = nil // Lazy initialize d.numstates = 0 if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - + precedenceState := NewDFAState(-1, NewATNConfigSet(false)) precedenceState.setEdges(make([]*DFAState, 0)) precedenceState.isAcceptState = false precedenceState.requiresFullContext = false @@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { } } +// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy +// instantiation of the states JMap. +func (d *DFA) Len() int { + if d.states == nil { + return 0 + } + return d.states.Len() +} + +// Get returns a state that matches s if it is present in the DFA state set. We defer to this +// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap. +func (d *DFA) Get(s *DFAState) (*DFAState, bool) { + if d.states == nil { + return nil, false + } + return d.states.Get(s) +} + +func (d *DFA) Put(s *DFAState) (*DFAState, bool) { + if d.states == nil { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put") + } + return d.states.Put(s) +} + func (d *DFA) getS0() *DFAState { return d.s0 } @@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) { d.s0 = s } -// sortedStates returns the states in d sorted by their state number. +// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil. func (d *DFA) sortedStates() []*DFAState { - + if d.states == nil { + return []*DFAState{} + } vs := d.states.SortedSlice(func(i, j *DFAState) bool { return i.stateNumber < j.stateNumber }) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go similarity index 97% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go index 84d0a31e53..0e11009899 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go @@ -10,7 +10,7 @@ import ( "strings" ) -// DFASerializer is a DFA walker that knows how to dump them to serialized +// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized // strings. type DFASerializer struct { dfa *DFA diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go similarity index 81% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa_state.go index c90dec55c8..6541430745 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go @@ -22,30 +22,31 @@ func (p *PredPrediction) String() string { return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" } -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi, // Ullman p. 117 says: "The DFA uses its state to keep track of all possible // states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the +// after reading input a1, a2,..an, the DFA is in a state that represents the // subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state +// state along some path labeled a1a2..an." +// +// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of +// states the [ATN] could be in. We need to track the alt predicted by each state // as well, however. More importantly, we need to maintain a stack of states, // tracking the closure operations as they jump from rule to rule, emulating // rule invocations (method calls). I have to add a stack to simulate the proper // lookahead sequences for the underlying LL grammar from which the ATN was // derived. // -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules +// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a +// state (ala normal conversion) and a [RuleContext] describing the chain of rules // (if any) followed to arrive at that state. // -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was +// A [DFAState] may have multiple references to a particular state, but with +// different [ATN] contexts (with same or different alts) meaning that state was // reached via a different set of rule invocations. type DFAState struct { stateNumber int - configs ATNConfigSet + configs *ATNConfigSet // edges elements point to the target of the symbol. Shift up by 1 so (-1) // Token.EOF maps to the first element. @@ -53,7 +54,7 @@ type DFAState struct { isAcceptState bool - // prediction is the ttype we match or alt we predict if the state is accept. + // prediction is the 'ttype' we match or alt we predict if the state is 'accept'. // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or // requiresFullContext. prediction int @@ -81,9 +82,9 @@ type DFAState struct { predicates []*PredPrediction } -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { +func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { if configs == nil { - configs = NewBaseATNConfigSet(false) + configs = NewATNConfigSet(false) } return &DFAState{configs: configs, stateNumber: stateNumber} @@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int { var alts []int if d.configs != nil { - for _, c := range d.configs.GetItems() { + for _, c := range d.configs.configs { alts = append(alts, c.GetAlt()) } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go index c55bcc19b2..bd2cd8bc3a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go +++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go @@ -33,6 +33,7 @@ type DiagnosticErrorListener struct { exactOnly bool } +//goland:noinspection GoUnusedExportedFunction func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { n := new(DiagnosticErrorListener) @@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { return n } -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { if d.exactOnly && !exact { return } @@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) { msg := "reportAttemptingFullContext d=" + d.getDecisionDescription(recognizer, dfa) + @@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) { msg := "reportContextSensitivity d=" + d.getDecisionDescription(recognizer, dfa) + ", input='" + @@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa // @param configs The conflicting or ambiguous configuration set. // @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise // returns the set of alternatives represented in {@code configs}. -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet { if ReportedAlts != nil { return ReportedAlts } result := NewBitSet() - for _, c := range set.GetItems() { + for _, c := range set.configs { result.add(c.GetAlt()) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go similarity index 62% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/error_listener.go index f679f0dcd5..21a0216434 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go +++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go @@ -16,28 +16,29 @@ import ( type ErrorListener interface { SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) } type DefaultErrorListener struct { } +//goland:noinspection GoUnusedExportedFunction func NewDefaultErrorListener() *DefaultErrorListener { return new(DefaultErrorListener) } -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { +func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) { } -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) { } -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) { } -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) { } type ConsoleErrorListener struct { @@ -48,21 +49,16 @@ func NewConsoleErrorListener() *ConsoleErrorListener { return new(ConsoleErrorListener) } -// Provides a default instance of {@link ConsoleErrorListener}. +// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}. var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() -// {@inheritDoc} +// SyntaxError prints messages to System.err containing the +// values of line, charPositionInLine, and msg using +// the following format: // -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +// line : +func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) { + _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) } type ProxyErrorListener struct { @@ -85,19 +81,19 @@ func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol } } -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { for _, d := range p.delegates { d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) } } -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { for _, d := range p.delegates { d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) } } -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { for _, d := range p.delegates { d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go similarity index 58% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go rename to vendor/github.com/antlr4-go/antlr/v4/error_strategy.go index 5c0a637ba4..9db2be1c74 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go +++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go @@ -21,8 +21,8 @@ type ErrorStrategy interface { ReportMatch(Parser) } -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. +// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for +// error reporting and recovery in ANTLR parsers. type DefaultErrorStrategy struct { errorRecoveryMode bool lastErrorIndex int @@ -46,7 +46,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy { // The index into the input stream where the last error occurred. // This is used to prevent infinite loops where an error is found // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least + // ad nauseam. This is a failsafe mechanism to guarantee that at least // one token/tree node is consumed for two errors. // d.lastErrorIndex = -1 @@ -62,50 +62,37 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) { // This method is called to enter error recovery mode when a recognition // exception is Reported. -// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { +func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) { d.errorRecoveryMode = true } -func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { +func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool { return d.errorRecoveryMode } // This method is called to leave error recovery mode after recovering from // a recognition exception. -// -// @param recognizer -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { +func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) { d.errorRecoveryMode = false d.lastErrorStates = nil d.lastErrorIndex = -1 } -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

+// ReportMatch is the default implementation of error matching and simply calls endErrorCondition. func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { d.endErrorCondition(recognizer) } -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -//
    -//
  • {@link NoViableAltException}: Dispatches the call to -// {@link //ReportNoViableAlternative}
  • -//
  • {@link InputMisMatchException}: Dispatches the call to -// {@link //ReportInputMisMatch}
  • -//
  • {@link FailedPredicateException}: Dispatches the call to -// {@link //ReportFailedPredicate}
  • -//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report -// the exception
  • -//
+// ReportError is the default implementation of error reporting. +// It returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls [beginErrorCondition] +// and dispatches the Reporting task based on the runtime type of e +// according to the following table. +// +// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative] +// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch] +// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate] +// All other types : Calls [NotifyErrorListeners] to Report the exception func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { // if we've already Reported an error and have not Matched a token // yet successfully, don't Report any errors. @@ -128,12 +115,10 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep } } -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { +// Recover is the default recovery implementation. +// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set - +// loosely the set of tokens that can follow the current rule. +func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) { if d.lastErrorIndex == recognizer.GetInputStream().Index() && d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { @@ -148,54 +133,58 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException d.lastErrorStates = NewIntervalSet() } d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) + followSet := d.GetErrorRecoverySet(recognizer) d.consumeUntil(recognizer, followSet) } -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. +// Sync is the default implementation of error strategy synchronization. +// +// This Sync makes sure that the current lookahead symbol is consistent with what were expecting +// at this point in the [ATN]. You can call this anytime but ANTLR only +// generates code to check before sub-rules/loops and each iteration. // -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

+// Implements [Jim Idle]'s magic Sync mechanism in closures and optional +// sub-rules. E.g.: // -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
+// a : Sync ( stuff Sync )* +// Sync : {consume to what can follow Sync} // -// At the start of a sub rule upon error, {@link //Sync} performs single +// At the start of a sub-rule upon error, Sync performs single // token deletion, if possible. If it can't do that, it bails on the current // rule and uses the default error recovery, which consumes until the // reSynchronization set of the current rule. // -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

+// If the sub-rule is optional +// +// ({@code (...)?}, {@code (...)*}, // -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

+// or a block with an empty alternative), then the expected set includes what follows +// the sub-rule. // -//

ORIGINS

+// During loop iteration, it consumes until it sees a token that can start a +// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible. // -//

Previous versions of ANTLR did a poor job of their recovery within loops. +// # Origins +// +// Previous versions of ANTLR did a poor job of their recovery within loops. // A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

+// out of the entire rules surrounding the loop. So, for rule: // -//
-// classfunc : 'class' ID '{' member* '}'
-// 
+// classfunc : 'class' ID '{' member* '}' // // input with an extra token between members would force the parser to // consume until it found the next class definition rather than the next // member definition of the current class. // -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

+// This functionality cost a bit of effort because the parser has to +// compare the token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off this +// functionality by simply overriding this method as empty: +// +// { } +// +// [Jim Idle]: https://github.com/jimidle func (d *DefaultErrorStrategy) Sync(recognizer Parser) { // If already recovering, don't try to Sync if d.InErrorRecoveryMode(recognizer) { @@ -217,25 +206,21 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) { if d.SingleTokenDeletion(recognizer) != nil { return } - panic(NewInputMisMatchException(recognizer)) + recognizer.SetError(NewInputMisMatchException(recognizer)) case ATNStatePlusLoopBack, ATNStateStarLoopBack: d.ReportUnwantedToken(recognizer) expecting := NewIntervalSet() expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) + whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer)) d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) default: // do nothing if we can't identify the exact kind of ATN state } } -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError +// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException]. // -// @param recognizer the parser instance -// @param e the recognition exception +// See also [ReportError] func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { tokens := recognizer.GetTokenStream() var input string @@ -252,48 +237,38 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. +// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException] // -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError +// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException]. // -// @param recognizer the parser instance -// @param e the recognition exception +// See also: [ReportError] func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] msg := "rule " + ruleName + " " + e.message recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// This method is called to Report a syntax error which requires the removal +// ReportUnwantedToken is called to report a syntax error that requires the removal // of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. +// erroneous symbol is the current LT(1) symbol and has not yet been +// removed from the input stream. When this method returns, +// recognizer is in error recovery mode. // -//

This method is called when {@link //singleTokenDeletion} identifies +// This method is called when singleTokenDeletion identifies // single-token deletion as a viable recovery strategy for a mismatched -// input error.

+// input error. // -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to // enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance +// [NotifyErrorListeners] func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -307,21 +282,18 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { recognizer.NotifyErrorListeners(msg, t, nil) } -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. +// ReportMissingToken is called to report a syntax error which requires the +// insertion of a missing token into the input stream. At the time this +// method is called, the missing token has not yet been inserted. When this +// method returns, recognizer is in error recovery mode. // -//

This method is called when {@link //singleTokenInsertion} identifies +// This method is called when singleTokenInsertion identifies // single-token insertion as a viable recovery strategy for a mismatched -// input error.

+// input error. // -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling [NotifyErrorListeners] func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -334,54 +306,48 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { recognizer.NotifyErrorListeners(msg, t, nil) } -//

The default implementation attempts to recover from the mismatched input +// The RecoverInline default implementation attempts to recover from the mismatched input // by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

+// recovery attempt fails, this method panics with [InputMisMatchException}. +// TODO: Not sure that panic() is the right thing to do here - JI // -//

EXTRA TOKEN (single token deletion)

+// # EXTRA TOKEN (single token deletion) // -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious +// LA(1) is not what we are looking for. If LA(2) has the +// right token, however, then assume LA(1) is some extra spurious // token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

+// the LA(2) token) as the successful result of the Match operation. // -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

+// # This recovery strategy is implemented by singleTokenDeletion // -//

MISSING TOKEN (single token insertion)

+// # MISSING TOKEN (single token insertion) // -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

+// If current token -at LA(1) - is consistent with what could come +// after the expected LA(1) token, then assume the token is missing +// and use the parser's [TokenFactory] to create it on the fly. The +// “insertion” is performed by returning the created token as the successful +// result of the Match operation. // -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

+// This recovery strategy is implemented by [SingleTokenInsertion]. // -//

EXAMPLE

+// # Example // -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

+// For example, Input i=(3 is clearly missing the ')'. When +// the parser returns from the nested call to expr, it will have +// call the chain: // -//
-// stat &rarr expr &rarr atom
-// 
+// stat → expr → atom // -// and it will be trying to Match the {@code ')'} at d point in the +// and it will be trying to Match the ')' at this point in the // derivation: // -//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-// 
+// : ID '=' '(' INT ')' ('+' atom)* ';' +// ^ // -// The attempt to Match {@code ')'} will fail when it sees {@code ”} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. +// The attempt to [Match] ')' will fail when it sees ';' and +// call [RecoverInline]. To recover, it sees that LA(1)==';' +// is in the set of tokens that can follow the ')' token reference +// in rule atom. It can assume that you forgot the ')'. func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { // SINGLE TOKEN DELETION MatchedSymbol := d.SingleTokenDeletion(recognizer) @@ -396,24 +362,24 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { return d.GetMissingSymbol(recognizer) } // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) + recognizer.SetError(NewInputMisMatchException(recognizer)) + return nil } -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token +// SingleTokenInsertion implements the single-token insertion inline error recovery +// strategy. It is called by [RecoverInline] if the single-token // deletion strategy fails to recover from the mismatched input. If this // method returns {@code true}, {@code recognizer} will be in error recovery // mode. // -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns +// This method determines whether single-token insertion is viable by +// checking if the LA(1) input symbol could be successfully Matched +// if it were instead the LA(2) symbol. If this method returns // {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

+// token with the correct type to produce this behavior.

// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} +// This func returns true if single-token insertion is a viable recovery +// strategy for the current mismatched input. func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { currentSymbolType := recognizer.GetTokenStream().LA(1) // if current token is consistent with what could come after current @@ -431,23 +397,21 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { return false } -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover +// SingleTokenDeletion implements the single-token deletion inline error recovery +// strategy. It is called by [RecoverInline] to attempt to recover // from mismatched input. If this method returns nil, the parser and error // handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the +// recognizer will not be in error recovery mode since the // returned token was a successful Match. // -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

+// If the single-token deletion is successful, this method calls +// [ReportUnwantedToken] to Report the error, followed by +// [Consume] to actually “delete” the extraneous token. Then, +// before returning, [ReportMatch] is called to signal a successful +// Match. // -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} +// The func returns the successfully Matched [Token] instance if single-token +// deletion successfully recovers from the mismatched input, otherwise nil. func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { NextTokenType := recognizer.GetTokenStream().LA(2) expecting := d.GetExpectedTokens(recognizer) @@ -467,24 +431,28 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { return nil } -// Conjure up a missing token during error recovery. +// GetMissingSymbol conjures up a missing token during error recovery. // // The recognizer attempts to recover from single missing // symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes +// For example: +// +// x=ID {f($x)}. +// +// The action clearly assumes // that there has been an identifier Matched previously and that // $x points at that token. If that token is missing, but // the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we +// this token is missing, and we keep going. Because we // have to return some token to replace the missing token, // we have to conjure one up. This method gives the user control // over the tokens returned for missing tokens. Mostly, // you will want to create something special for identifier // tokens. For literals such as '{' and ',', the default // action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. +// a [CommonToken] of the appropriate type. The text will be the token name. +// If you need to change which tokens must be created by the lexer, +// override this method to create the appropriate tokens. func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { currentSymbol := recognizer.GetCurrentToken() expecting := d.GetExpectedTokens(recognizer) @@ -498,7 +466,7 @@ func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { if expectedTokenType > 0 && expectedTokenType < len(ln) { tokenText = "" } else { - tokenText = "" // TODO matches the JS impl + tokenText = "" // TODO: matches the JS impl } } current := currentSymbol @@ -516,13 +484,13 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet return recognizer.GetExpectedTokens() } -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about +// GetTokenErrorDisplay determines how a token should be displayed in an error message. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override this func in that case +// to use t.String() (which, for [CommonToken], dumps everything about // the token). This is better than forcing you to override a method in // your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. +// so that it creates a new type. func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { if t == nil { return "" @@ -545,52 +513,57 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { return "'" + s + "'" } -// Compute the error recovery set for the current rule. During +// GetErrorRecoverySet computes the error recovery set for the current rule. During // rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to +// follow that rule reference on the stack. This amounts to // computing FIRST of what follows the rule reference in the // enclosing rule. See LinearApproximator.FIRST(). +// // This local follow set only includes tokens // from within the rule i.e., the FIRST computation done by // ANTLR stops at the end of a rule. // -// # EXAMPLE +// # Example // // When you find a "no viable alt exception", the input is not // consistent with any of the alternatives for rule r. The best // thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. +// can legally follow a call to r or any rule that called r. // You don't want the exact set of viable next tokens because the // input might just be missing a token--you might consume the // rest of the input looking for one of the missing tokens. // -// Consider grammar: +// Consider the grammar: +// +// a : '[' b ']' +// | '(' b ')' +// ; // -// a : '[' b ']' -// | '(' b ')' +// b : c '^' INT +// ; // -// b : c '^' INT -// c : ID -// | INT +// c : ID +// | INT +// ; // // At each rule invocation, the set of tokens that could follow // that rule is pushed on a stack. Here are the various // context-sensitive follow sets: // -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' // -// Upon erroneous input "[]", the call chain is +// Upon erroneous input “[]”, the call chain is // -// a -> b -> c +// a → b → c // // and, hence, the follow context stack is: // -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c +// Depth Follow set Start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c // // Notice that ')' is not included, because b would have to have // been called from a different context in rule a for ')' to be @@ -598,11 +571,14 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // // For error recovery, we cannot consider FOLLOW(c) // (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that +// all context-sensitive FOLLOW sets - the set of all tokens that // could follow any reference in the call chain. We need to // reSync to one of those tokens. Note that FOLLOW(c)='^' and if // we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. +// Sync to context-sensitive FOLLOWs for a, b, and c: +// +// {']','^'} +// // In this case, for input "[]", LA(1) is ']' and in the set, so we would // not consume anything. After printing an error, rule c would // return normally. Rule b would not find the required '^' though. @@ -620,22 +596,19 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // // ANTLR's error recovery mechanism is based upon original ideas: // -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs] by Niklaus Wirth and +// [A note on error recovery in recursive descent parsers]. // -// Later, Josef Grosch had some good ideas: +// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers] // -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead +// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs // -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { +// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE +// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet { atn := recognizer.GetInterpreter().atn ctx := recognizer.GetParserRuleContext() recoverSet := NewIntervalSet() @@ -660,40 +633,36 @@ func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) } } -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors +// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors // by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes +// [ParseCancellationException]. The implementation ensures that the +// [ParserRuleContext//exception] field is set for all parse tree nodes // that were not completed prior to encountering the error. // -//

-// This error strategy is useful in the following scenarios.

+// This error strategy is useful in the following scenarios. // -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
+// - Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of [BailErrorStrategy.Sync] improves the performance of +// the first stage. // -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

+// - Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the [BailErrorStrategy] avoids wasting work on recovering from errors +// when the result will be ignored either way. // -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - +// myparser.SetErrorHandler(NewBailErrorStrategy()) +// +// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)] type BailErrorStrategy struct { *DefaultErrorStrategy } var _ ErrorStrategy = &BailErrorStrategy{} +//goland:noinspection GoUnusedExportedFunction func NewBailErrorStrategy() *BailErrorStrategy { b := new(BailErrorStrategy) @@ -703,10 +672,10 @@ func NewBailErrorStrategy() *BailErrorStrategy { return b } -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. +// Recover Instead of recovering from exception e, re-panic it wrapped +// in a [ParseCancellationException] so it is not caught by the +// rule func catches. Use Exception.GetCause() to get the +// original [RecognitionException]. func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { context := recognizer.GetParserRuleContext() for context != nil { @@ -717,10 +686,10 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { context = nil } } - panic(NewParseCancellationException()) // TODO we don't emit e properly + recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly } -// Make sure we don't attempt to recover inline if the parser +// RecoverInline makes sure we don't attempt to recover inline if the parser // successfully recovers, it won't panic an exception. func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { b.Recover(recognizer, NewInputMisMatchException(recognizer)) @@ -728,7 +697,6 @@ func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { return nil } -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass +// Sync makes sure we don't attempt to recover from problems in sub-rules. +func (b *BailErrorStrategy) Sync(_ Parser) { } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go similarity index 73% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go rename to vendor/github.com/antlr4-go/antlr/v4/errors.go index 3954c13782..8f0f2f601f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go +++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go @@ -35,7 +35,7 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In // } else { // stack := NewError().stack // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int + // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int t := new(BaseRecognitionException) @@ -43,15 +43,17 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In t.recognizer = recognizer t.input = input t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams + + // The current Token when an error occurred. Since not all streams // support accessing symbols by index, we have to track the {@link Token} // instance itself. + // t.offendingToken = nil + // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. + // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the + // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match. + // t.offendingState = -1 if t.recognizer != nil { t.offendingState = t.recognizer.GetState() @@ -74,15 +76,15 @@ func (b *BaseRecognitionException) GetInputStream() IntStream { //

If the state number is not known, b method returns -1.

-// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. +// getExpectedTokens gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time this exception was raised. // -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

+// If the set of expected tokens is not known and could not be computed, +// this method returns nil. // -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / +// The func returns the set of token types that could potentially follow the current +// state in the {ATN}, or nil if the information is not available. + func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { if b.recognizer != nil { return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) @@ -99,10 +101,10 @@ type LexerNoViableAltException struct { *BaseRecognitionException startIndex int - deadEndConfigs ATNConfigSet + deadEndConfigs *ATNConfigSet } -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { l := new(LexerNoViableAltException) @@ -128,14 +130,16 @@ type NoViableAltException struct { startToken Token offendingToken Token ctx ParserRuleContext - deadEndConfigs ATNConfigSet + deadEndConfigs *ATNConfigSet } -// Indicates that the parser could not decide which of two or more paths +// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths // to take based upon the remaining input. It tracks the starting token // of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { +// in the various paths when the error. +// +// Reported by [ReportNoViableAlternative] +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { if ctx == nil { ctx = recognizer.GetParserRuleContext() @@ -157,12 +161,14 @@ func NewNoViableAltException(recognizer Parser, input TokenStream, startToken To n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// + // input.LT(1) n.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) + // not be buffering tokens so get a reference to it. + // + // At the time the error occurred, of course the stream needs to keep a + // buffer of all the tokens, but later we might not have access to those. n.startToken = startToken n.offendingToken = offendingToken @@ -173,7 +179,7 @@ type InputMisMatchException struct { *BaseRecognitionException } -// This signifies any kind of mismatched input exceptions such as +// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as // when the current input does not Match the expected token. func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { @@ -186,11 +192,10 @@ func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { } -// A semantic predicate failed during validation. Validation of predicates +// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates // occurs when normally parsing the alternative just like Matching a token. // Disambiguating predicate evaluation occurs when we test a predicate during // prediction. - type FailedPredicateException struct { *BaseRecognitionException @@ -199,6 +204,7 @@ type FailedPredicateException struct { predicate string } +//goland:noinspection GoUnusedExportedFunction func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { f := new(FailedPredicateException) @@ -231,6 +237,21 @@ func (f *FailedPredicateException) formatMessage(predicate, message string) stri type ParseCancellationException struct { } +func (p ParseCancellationException) GetOffendingToken() Token { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetMessage() string { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetInputStream() IntStream { + //TODO implement me + panic("implement me") +} + func NewParseCancellationException() *ParseCancellationException { // Error.call(this) // Error.captureStackTrace(this, ParseCancellationException) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go similarity index 52% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/file_stream.go index bd6ad5efe3..5f65f809be 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go @@ -5,8 +5,7 @@ package antlr import ( - "bytes" - "io" + "bufio" "os" ) @@ -14,34 +13,53 @@ import ( // when you construct the object. type FileStream struct { - *InputStream - + InputStream filename string } +//goland:noinspection GoUnusedExportedFunction func NewFileStream(fileName string) (*FileStream, error) { - buf := bytes.NewBuffer(nil) - f, err := os.Open(fileName) if err != nil { return nil, err } - defer f.Close() - _, err = io.Copy(buf, f) + + defer func(f *os.File) { + errF := f.Close() + if errF != nil { + } + }(f) + + reader := bufio.NewReader(f) + fInfo, err := f.Stat() if err != nil { return nil, err } - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) + fs := &FileStream{ + InputStream: InputStream{ + index: 0, + name: fileName, + }, + filename: fileName, + } - fs.InputStream = NewInputStream(s) + // Pre-build the buffer and read runes efficiently + // + fs.data = make([]rune, 0, fInfo.Size()) + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + fs.data = append(fs.data, r) + } + fs.size = len(fs.data) // Size in runes + // All done. + // return fs, nil - } func (f *FileStream) GetSourceName() string { diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go new file mode 100644 index 0000000000..ab4e96be52 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -0,0 +1,157 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "io" +) + +type InputStream struct { + name string + index int + data []rune + size int +} + +// NewIoStream creates a new input stream from the given io.Reader reader. +// Note that the reader is read completely into memory and so it must actually +// have a stopping point - you cannot pass in a reader on an open-ended source such +// as a socket for instance. +func NewIoStream(reader io.Reader) *InputStream { + + rReader := bufio.NewReader(reader) + + is := &InputStream{ + name: "", + index: 0, + } + + // Pre-build the buffer and read runes reasonably efficiently given that + // we don't exactly know how big the input is. + // + is.data = make([]rune, 0, 512) + for { + r, _, err := rReader.ReadRune() + if err != nil { + break + } + is.data = append(is.data, r) + } + is.size = len(is.data) // number of runes + return is +} + +// NewInputStream creates a new input stream from the given string +func NewInputStream(data string) *InputStream { + + is := &InputStream{ + name: "", + index: 0, + data: []rune(data), // This is actually the most efficient way + } + is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +// Consume moves the input pointer to the next character in the input stream +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +// LA returns the character at the given offset from the start of the input stream +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +// LT returns the character at the given offset from the start of the input stream +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +// Index returns the current offset in to the input stream +func (is *InputStream) Index() int { + return is.index +} + +// Size returns the total number of characters in the input stream +func (is *InputStream) Size() int { + return is.size +} + +// Mark does nothing here as we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +// Release does nothing here as we have entire buffer +func (is *InputStream) Release(_ int) { +} + +// Seek the input point to the provided index offset +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +// GetText returns the text from the input stream from the start to the stop index +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last +// character of the stop token +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "Obtained from string" +} + +// String returns the entire input stream as a string +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/int_stream.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go similarity index 82% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go rename to vendor/github.com/antlr4-go/antlr/v4/interval_set.go index c1e155e818..cc5066067a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go +++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go @@ -14,20 +14,21 @@ type Interval struct { Stop int } -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i +// NewInterval creates a new interval with the given start and stop values. +func NewInterval(start, stop int) Interval { + return Interval{ + Start: start, + Stop: stop, + } } -func (i *Interval) Contains(item int) bool { +// Contains returns true if the given item is contained within the interval. +func (i Interval) Contains(item int) bool { return item >= i.Start && item < i.Stop } -func (i *Interval) String() string { +// String generates a string representation of the interval. +func (i Interval) String() string { if i.Start == i.Stop-1 { return strconv.Itoa(i.Start) } @@ -35,15 +36,18 @@ func (i *Interval) String() string { return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) } -func (i *Interval) length() int { +// Length returns the length of the interval. +func (i Interval) Length() int { return i.Stop - i.Start } +// IntervalSet represents a collection of [Intervals], which may be read-only. type IntervalSet struct { - intervals []*Interval + intervals []Interval readOnly bool } +// NewIntervalSet creates a new empty, writable, interval set. func NewIntervalSet() *IntervalSet { i := new(IntervalSet) @@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet { return i } +func (i *IntervalSet) Equals(other *IntervalSet) bool { + if len(i.intervals) != len(other.intervals) { + return false + } + + for k, v := range i.intervals { + if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop { + return false + } + } + + return true +} + func (i *IntervalSet) first() int { if len(i.intervals) == 0 { return TokenInvalidType @@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) { i.addInterval(NewInterval(l, h+1)) } -func (i *IntervalSet) addInterval(v *Interval) { +func (i *IntervalSet) addInterval(v Interval) { if i.intervals == nil { - i.intervals = make([]*Interval, 0) + i.intervals = make([]Interval, 0) i.intervals = append(i.intervals, v) } else { // find insert pos for k, interval := range i.intervals { // distinct range -> insert if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...) return } else if v.Stop == interval.Start { i.intervals[k].Start = v.Start @@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool { } func (i *IntervalSet) length() int { - len := 0 + iLen := 0 for _, v := range i.intervals { - len += v.length() + iLen += v.Length() } - return len + return iLen } -func (i *IntervalSet) removeRange(v *Interval) { +func (i *IntervalSet) removeRange(v Interval) { if v.Start == v.Stop-1 { i.removeOne(v.Start) } else if i.intervals != nil { @@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) { i.intervals[k] = NewInterval(ni.Start, v.Start) x := NewInterval(v.Stop, ni.Stop) // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } else if v.Start <= ni.Start && v.Stop >= ni.Stop { // i.intervals.splice(k, 1) @@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) { x := NewInterval(ki.Start, v) ki.Start = v + 1 // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } } @@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin return i.toIndexString() } -func (i *IntervalSet) GetIntervals() []*Interval { +func (i *IntervalSet) GetIntervals() []Interval { return i.intervals } diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go new file mode 100644 index 0000000000..6d668f7983 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -0,0 +1,684 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "container/list" + "runtime/debug" + "sort" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +type CollectionSource int +type CollectionDescriptor struct { + SybolicName string + Description string +} + +const ( + UnknownCollection CollectionSource = iota + ATNConfigLookupCollection + ATNStateCollection + DFAStateCollection + ATNConfigCollection + PredictionContextCollection + SemanticContextCollection + ClosureBusyCollection + PredictionVisitedCollection + MergeCacheCollection + PredictionContextCacheCollection + AltSetCollection + ReachSetCollection +) + +var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{ + UnknownCollection: { + SybolicName: "UnknownCollection", + Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.", + }, + ATNConfigCollection: { + SybolicName: "ATNConfigCollection", + Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." + + "For instance, it is used to store the results of the closure() operation in the ATN.", + }, + ATNConfigLookupCollection: { + SybolicName: "ATNConfigLookupCollection", + Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." + + "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.", + }, + ATNStateCollection: { + SybolicName: "ATNStateCollection", + Description: "ATNState collection. This is used to store the states of the ATN.", + }, + DFAStateCollection: { + SybolicName: "DFAStateCollection", + Description: "DFAState collection. This is used to store the states of the DFA.", + }, + PredictionContextCollection: { + SybolicName: "PredictionContextCollection", + Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.", + }, + SemanticContextCollection: { + SybolicName: "SemanticContextCollection", + Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.", + }, + ClosureBusyCollection: { + SybolicName: "ClosureBusyCollection", + Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." + + "It stores ATNConfigs that are currently being processed in the closure() operation.", + }, + PredictionVisitedCollection: { + SybolicName: "PredictionVisitedCollection", + Description: "A map that records whether we have visited a particular context when searching through cached entries.", + }, + MergeCacheCollection: { + SybolicName: "MergeCacheCollection", + Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.", + }, + PredictionContextCacheCollection: { + SybolicName: "PredictionContextCacheCollection", + Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.", + }, + AltSetCollection: { + SybolicName: "AltSetCollection", + Description: "Used to eliminate duplicate alternatives in an ATN config set.", + }, + ReachSetCollection: { + SybolicName: "ReachSetCollection", + Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.", + }, +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] + stats *JStatRec +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + if collectStats { + s.stats = &JStatRec{ + Source: cType, + Description: desc, + } + + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + s.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(s.stats) + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { + + if collectStats { + s.stats.Puts++ + } + kh := s.comparator.Hash1(value) + + var hClash bool + for _, v1 := range s.store[kh] { + hClash = true + if s.comparator.Equals2(value, v1) { + if collectStats { + s.stats.PutHits++ + s.stats.PutHashConflicts++ + } + return v1, true + } + if collectStats { + s.stats.PutMisses++ + } + } + if collectStats && hClash { + s.stats.PutHashConflicts++ + } + s.store[kh] = append(s.store[kh], value) + + if collectStats { + if len(s.store[kh]) > s.stats.MaxSlotSize { + s.stats.MaxSlotSize = len(s.store[kh]) + } + } + s.len++ + if collectStats { + s.stats.CurSize = s.len + if s.len > s.stats.MaxSize { + s.stats.MaxSize = s.len + } + } + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { + if collectStats { + s.stats.Gets++ + } + kh := s.comparator.Hash1(key) + var hClash bool + for _, v := range s.store[kh] { + hClash = true + if s.comparator.Equals2(key, v) { + if collectStats { + s.stats.GetHits++ + s.stats.GetHashConflicts++ + } + return v, true + } + if collectStats { + s.stats.GetMisses++ + } + } + if collectStats { + if hClash { + s.stats.GetHashConflicts++ + } + s.stats.GetNoEnt++ + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + vs = append(vs, e...) + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] + stats *JStatRec +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] { + m := &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) { + if collectStats { + m.stats.Puts++ + } + kh := m.comparator.Hash1(key) + + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.PutHits++ + m.stats.PutHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.PutMisses++ + } + } + if collectStats { + if hClash { + m.stats.PutHashConflicts++ + } + } + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + if collectStats { + if len(m.store[kh]) > m.stats.MaxSlotSize { + m.stats.MaxSlotSize = len(m.store[kh]) + } + } + m.len++ + if collectStats { + m.stats.CurSize = m.len + if m.len > m.stats.MaxSize { + m.stats.MaxSize = m.len + } + } + return val, false +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + if collectStats { + m.stats.Gets++ + } + var none V + kh := m.comparator.Hash1(key) + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.GetHits++ + m.stats.GetHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.GetMisses++ + } + } + if collectStats { + if hClash { + m.stats.GetHashConflicts++ + } + m.stats.GetNoEnt++ + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return m.len +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} + +type JPCMap struct { + store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]] + size int + stats *JStatRec +} + +func NewJPCMap(cType CollectionSource, desc string) *JPCMap { + m := &JPCMap{ + store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + // Do we have a map stored by k1? + // + m2, present := pcm.store.Get(k1) + if present { + if collectStats { + pcm.stats.GetHits++ + } + // We found a map of values corresponding to k1, so now we need to look up k2 in that map + // + return m2.Get(k2) + } + if collectStats { + pcm.stats.GetMisses++ + } + return nil, false +} + +func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) { + + if collectStats { + pcm.stats.Puts++ + } + // First does a map already exist for k1? + // + if m2, present := pcm.store.Get(k1); present { + if collectStats { + pcm.stats.PutHits++ + } + _, present = m2.Put(k2, v) + if !present { + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + } + } else { + // No map found for k1, so we create it, add in our value, then store is + // + if collectStats { + pcm.stats.PutMisses++ + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry") + } else { + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry") + } + + m2.Put(k2, v) + pcm.store.Put(k1, m2) + pcm.size++ + } +} + +type JPCMap2 struct { + store map[int][]JPCEntry + size int + stats *JStatRec +} + +type JPCEntry struct { + k1, k2, v *PredictionContext +} + +func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 { + m := &JPCMap2{ + store: make(map[int][]JPCEntry, 1000), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func dHash(k1, k2 *PredictionContext) int { + return k1.cachedHash*31 + k2.cachedHash +} + +func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.GetHits++ + pcm.stats.GetHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.GetMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.GetHashConflicts++ + } + pcm.stats.GetNoEnt++ + } + return nil, false +} + +func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Puts++ + } + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.PutHits++ + pcm.stats.PutHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.PutMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.PutHashConflicts++ + } + } + pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v}) + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + return nil, false +} + +type VisitEntry struct { + k *PredictionContext + v *PredictionContext +} +type VisitRecord struct { + store map[*PredictionContext]*PredictionContext + len int + stats *JStatRec +} + +type VisitList struct { + cache *list.List + lock RWMutex +} + +var visitListPool = VisitList{ + cache: list.New(), + lock: RWMutex{}, +} + +// NewVisitRecord returns a new VisitRecord instance from the pool if available. +// Note that this "map" uses a pointer as a key because we are emulating the behavior of +// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal, +// which means is the key the same reference to an object rather than is it .equals() to another +// object. +func NewVisitRecord() *VisitRecord { + visitListPool.lock.Lock() + el := visitListPool.cache.Front() + defer visitListPool.lock.Unlock() + var vr *VisitRecord + if el == nil { + vr = &VisitRecord{ + store: make(map[*PredictionContext]*PredictionContext), + } + if collectStats { + vr.stats = &JStatRec{ + Source: PredictionContextCacheCollection, + Description: "VisitRecord", + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + vr.stats.CreateStack = debug.Stack() + } + } + } else { + vr = el.Value.(*VisitRecord) + visitListPool.cache.Remove(el) + vr.store = make(map[*PredictionContext]*PredictionContext) + } + if collectStats { + Statistics.AddJStatRec(vr.stats) + } + return vr +} + +func (vr *VisitRecord) Release() { + vr.len = 0 + vr.store = nil + if collectStats { + vr.stats.MaxSize = 0 + vr.stats.CurSize = 0 + vr.stats.Gets = 0 + vr.stats.GetHits = 0 + vr.stats.GetMisses = 0 + vr.stats.GetHashConflicts = 0 + vr.stats.GetNoEnt = 0 + vr.stats.Puts = 0 + vr.stats.PutHits = 0 + vr.stats.PutMisses = 0 + vr.stats.PutHashConflicts = 0 + vr.stats.MaxSlotSize = 0 + } + visitListPool.lock.Lock() + visitListPool.cache.PushBack(vr) + visitListPool.lock.Unlock() +} + +func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Gets++ + } + v := vr.store[k] + if v != nil { + if collectStats { + vr.stats.GetHits++ + } + return v, true + } + if collectStats { + vr.stats.GetNoEnt++ + } + return nil, false +} + +func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Puts++ + } + vr.store[k] = v + vr.len++ + if collectStats { + vr.stats.CurSize = vr.len + if vr.len > vr.stats.MaxSize { + vr.stats.MaxSize = vr.len + } + } + return v, false +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go similarity index 78% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer.go index 6533f05164..e5594b2168 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer { // create a single token. NextToken will return l object after // Matching lexer rule(s). If you subclass to allow multiple token // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not + // something non nil so that the auto token emit mechanism will not // emit another token. lexer.token = nil @@ -111,6 +111,7 @@ const ( LexerSkip = -3 ) +//goland:noinspection GoUnusedConst const ( LexerDefaultTokenChannel = TokenDefaultChannel LexerHidden = TokenHiddenChannel @@ -118,7 +119,7 @@ const ( LexerMaxCharValue = 0x10FFFF ) -func (b *BaseLexer) reset() { +func (b *BaseLexer) Reset() { // wack Lexer state variables if b.input != nil { b.input.Seek(0) // rewind the input @@ -176,7 +177,7 @@ func (b *BaseLexer) safeMatch() (ret int) { return b.Interpreter.Match(b.input, b.mode) } -// Return a token from l source i.e., Match a token on the char stream. +// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream. func (b *BaseLexer) NextToken() Token { if b.input == nil { panic("NextToken requires a non-nil input stream.") @@ -205,9 +206,8 @@ func (b *BaseLexer) NextToken() Token { continueOuter := false for { b.thetype = TokenInvalidType - ttype := LexerSkip - ttype = b.safeMatch() + ttype := b.safeMatch() // Defaults to LexerSkip if b.input.LA(1) == TokenEOF { b.hitEOF = true @@ -234,12 +234,11 @@ func (b *BaseLexer) NextToken() Token { } } -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// Skip instructs the lexer to Skip creating a token for current lexer rule +// and look for another token. [NextToken] knows to keep looking when +// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that // if token==nil at end of any token rule, it creates one for you // and emits it. -// / func (b *BaseLexer) Skip() { b.thetype = LexerSkip } @@ -248,23 +247,29 @@ func (b *BaseLexer) More() { b.thetype = LexerMore } +// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode +// will be in force. func (b *BaseLexer) SetMode(m int) { b.mode = m } +// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the +// current lexer mode to the supplied mode m. func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("pushMode " + strconv.Itoa(m)) } b.modeStack.Push(b.mode) b.mode = m } +// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to +// return to. func (b *BaseLexer) PopMode() int { if len(b.modeStack) == 0 { panic("Empty Stack") } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) } i, _ := b.modeStack.Pop() @@ -280,7 +285,7 @@ func (b *BaseLexer) inputStream() CharStream { func (b *BaseLexer) SetInputStream(input CharStream) { b.input = nil b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() + b.Reset() b.input = input b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} } @@ -289,20 +294,19 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { return b.tokenFactorySourcePair } -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / +// EmitToken by default does not support multiple emits per [NextToken] invocation +// for efficiency reasons. Subclass and override this func, [NextToken], +// and [GetToken] (to push tokens into a list and pull from that list +// rather than a single variable as this implementation does). func (b *BaseLexer) EmitToken(token Token) { b.token = token } -// The standard method called to automatically emit a token at the +// Emit is the standard method called to automatically emit a token at the // outermost lexical rule. The token object should point into the // char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. +// use that to set the token's text. Override this method to emit +// custom [Token] objects or provide a new factory. // / func (b *BaseLexer) Emit() Token { t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) @@ -310,6 +314,7 @@ func (b *BaseLexer) Emit() Token { return t } +// EmitEOF emits an EOF token. By default, this is the last token emitted func (b *BaseLexer) EmitEOF() Token { cpos := b.GetCharPositionInLine() lpos := b.GetLine() @@ -318,6 +323,7 @@ func (b *BaseLexer) EmitEOF() Token { return eof } +// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned. func (b *BaseLexer) GetCharPositionInLine() int { return b.Interpreter.GetCharPositionInLine() } @@ -334,13 +340,12 @@ func (b *BaseLexer) SetType(t int) { b.thetype = t } -// What is the index of the current character of lookahead?/// +// GetCharIndex returns the index of the current character of lookahead func (b *BaseLexer) GetCharIndex() int { return b.input.Index() } -// Return the text Matched so far for the current token or any text override. -// Set the complete text of l token it wipes any previous changes to the text. +// GetText returns the text Matched so far for the current token or any text override. func (b *BaseLexer) GetText() string { if b.text != "" { return b.text @@ -349,17 +354,20 @@ func (b *BaseLexer) GetText() string { return b.Interpreter.GetText(b.input) } +// SetText sets the complete text of this token; it wipes any previous changes to the text. func (b *BaseLexer) SetText(text string) { b.text = text } +// GetATN returns the ATN used by the lexer. func (b *BaseLexer) GetATN() *ATN { return b.Interpreter.ATN() } -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / +// GetAllTokens returns a list of all [Token] objects in input char stream. +// Forces a load of all tokens that can be made from the input char stream. +// +// Does not include EOF token. func (b *BaseLexer) GetAllTokens() []Token { vl := b.Virt tokens := make([]Token, 0) @@ -398,11 +406,13 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string { return "'" + b.getErrorDisplayForChar(c) + "'" } -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope +// Recover can normally Match any char in its vocabulary after Matching +// a token, so here we do the easy thing and just kill a character and hope // it all works out. You can instead use the rule invocation stack // to do sophisticated error recovery if you are in a fragment rule. -// / +// +// In general, lexers should not need to recover and should have rules that cover any eventuality, such as +// a character that makes no sense to the recognizer. func (b *BaseLexer) Recover(re RecognitionException) { if b.input.LA(1) != TokenEOF { if _, ok := re.(*LexerNoViableAltException); ok { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go similarity index 78% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_action.go index 111656c295..eaa7393e06 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go @@ -7,14 +7,29 @@ package antlr import "strconv" const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. + // LexerActionTypeChannel represents a [LexerChannelAction] action. + LexerActionTypeChannel = 0 + + // LexerActionTypeCustom represents a [LexerCustomAction] action. + LexerActionTypeCustom = 1 + + // LexerActionTypeMode represents a [LexerModeAction] action. + LexerActionTypeMode = 2 + + // LexerActionTypeMore represents a [LexerMoreAction] action. + LexerActionTypeMore = 3 + + // LexerActionTypePopMode represents a [LexerPopModeAction] action. + LexerActionTypePopMode = 4 + + // LexerActionTypePushMode represents a [LexerPushModeAction] action. + LexerActionTypePushMode = 5 + + // LexerActionTypeSkip represents a [LexerSkipAction] action. + LexerActionTypeSkip = 6 + + // LexerActionTypeType represents a [LexerTypeAction] action. + LexerActionTypeType = 7 ) type LexerAction interface { @@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction { return la } -func (b *BaseLexerAction) execute(lexer Lexer) { +func (b *BaseLexerAction) execute(_ Lexer) { panic("Not implemented") } @@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool { } func (b *BaseLexerAction) Hash() int { - return b.actionType + h := murmurInit(0) + h = murmurUpdate(h, b.actionType) + return murmurFinish(h, 1) } func (b *BaseLexerAction) Equals(other LexerAction) bool { - return b == other + return b.actionType == other.getActionType() } -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip]. // -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The Skip command does not have any parameters, so this action is +// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE]. type LexerSkipAction struct { *BaseLexerAction } @@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction { return la } -// Provides a singleton instance of l parameterless lexer action. +// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. var LexerSkipActionINSTANCE = NewLexerSkipAction() func (l *LexerSkipAction) execute(lexer Lexer) { lexer.Skip() } +// String returns a string representation of the current [LexerSkipAction]. func (l *LexerSkipAction) String() string { return "skip" } +func (b *LexerSkipAction) Equals(other LexerAction) bool { + return other.getActionType() == LexerActionTypeSkip +} + // Implements the {@code type} lexer action by calling {@link Lexer//setType} // // with the assigned type. @@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string { return "actionType(" + strconv.Itoa(l.thetype) + ")" } -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. +// LexerPushModeAction implements the pushMode lexer action by calling +// [Lexer.pushMode] with the assigned mode. type LexerPushModeAction struct { *BaseLexerAction - mode int } @@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string { return "pushMode(" + strconv.Itoa(l.mode) + ")" } -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. // -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The popMode command does not have any parameters, so this action is +// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] type LexerPopModeAction struct { *BaseLexerAction } @@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string { return "more" } -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with // the assigned mode. type LexerModeAction struct { *BaseLexerAction - mode int } @@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool { } } -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. +// LexerChannelAction implements the channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. type LexerChannelAction struct { *BaseLexerAction - channel int } +// NewLexerChannelAction creates a channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. func NewLexerChannelAction(channel int) *LexerChannelAction { l := new(LexerChannelAction) l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) @@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string { // lexer actions, see {@link LexerActionExecutor//append} and // {@link LexerActionExecutor//fixOffsetBeforeMatch}.

-// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. type LexerIndexedCustomAction struct { *BaseLexerAction - offset int lexerAction LexerAction isPositionDependent bool } +// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset +// with a [LexerAction]. +// +// Note: This class is only required for lexer actions for which +// [LexerAction.isPositionDependent] returns true. +// +// The offset points into the input [CharStream], relative to +// the token start index, at which the specified lexerAction should be +// executed. func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { l := new(LexerIndexedCustomAction) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go similarity index 70% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go index be1ba7a7e3..dfc28c32b3 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go @@ -29,28 +29,20 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { l.lexerActions = lexerActions // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) + // of the performance-critical {@link ATNConfig//hashCode} operation. + l.cachedHash = murmurInit(0) for _, a := range lexerActions { l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) } + l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) return l } -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. +// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for +// the input [LexerActionExecutor] followed by a specified +// [LexerAction]. +// TODO: This does not match the Java code func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { if lexerActionExecutor == nil { return NewLexerActionExecutor([]LexerAction{lexerAction}) @@ -59,47 +51,42 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) } -// Creates a {@link LexerActionExecutor} which encodes the current offset +// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset // for position-dependent lexer actions. // -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end +// Normally, when the executor encounters lexer actions where +// [LexerAction.isPositionDependent] returns true, it calls +// [IntStream.Seek] on the input [CharStream] to set the input +// position to the end of the current token. This behavior provides +// for efficient [DFA] representation of lexer actions which appear at the end // of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

+// characters. // -//

Prior to traversing a Match transition in the ATN, the current offset +// Prior to traversing a Match transition in the [ATN], the current offset // from the token start index is assigned to all position-dependent lexer // actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of +// the offsets relative to the token start index, the [DFA] representation of // lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

+// to sharing among tokens of the same Length, regardless of their absolute +// position in the input stream. // -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

+// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this instance. // -// @param offset The current offset to assign to all position-dependent +// The offset is assigned to all position-dependent // lexer actions which do not already have offsets assigned. // -// @return A {@link LexerActionExecutor} which stores input stream offsets +// The func returns a [LexerActionExecutor] that stores input stream offsets // for all position-dependent lexer actions. -// / func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { var updatedLexerActions []LexerAction for i := 0; i < len(l.lexerActions); i++ { _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) if l.lexerActions[i].getIsPositionDependent() && !ok { if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } + updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) + updatedLexerActions = append(updatedLexerActions, l.lexerActions...) } - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go similarity index 80% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go index c573b75210..fe938b0259 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go @@ -10,10 +10,8 @@ import ( "strings" ) +//goland:noinspection GoUnusedGlobalVariable var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - LexerATNSimulatorMinDFAEdge = 0 LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN @@ -32,11 +30,11 @@ type ILexerATNSimulator interface { } type LexerATNSimulator struct { - *BaseATNSimulator + BaseATNSimulator recog Lexer predictionMode int - mergeCache DoubleDict + mergeCache *JPCMap2 startIndex int Line int CharPositionInLine int @@ -46,27 +44,35 @@ type LexerATNSimulator struct { } func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + l := &LexerATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } l.decisionToDFA = decisionToDFA l.recog = recog + // The current token's starting index into the character stream. // Shared across DFA to ATN simulation in case the ATN fails and the // DFA did not have a previous accept state. In l case, we use the // ATN-generated exception object. l.startIndex = -1 - // line number 1..n within the input/// + + // line number 1..n within the input l.Line = 1 + // The index of the character relative to the beginning of the line - // 0..n-1/// + // 0..n-1 l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration // info l.prevAccept = NewSimState() - // done + return l } @@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() { func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) } oldMode := l.mode @@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { predict := l.execATN(input, next) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) } return predict @@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("start state closure=" + ds0.configs.String()) } if ds0.isAcceptState { - // allow zero-length tokens + // allow zero-Length tokens l.captureSimState(l.prevAccept, input, ds0) } t := input.LA(1) s := ds0 // s is current/from DFA state for { // while more work - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("execATN loop starting closure: " + s.configs.String()) } @@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { } } t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state + s = target // flip current DFA target becomes new src/from state } return l.failOrAccept(l.prevAccept, input, s.configs, t) @@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState return nil } target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) - if LexerATNSimulatorDebug && target != nil { + if runtimeConfig.lexerATNSimulatorDebug && target != nil { fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) } return target } -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. +// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the +// computed state and corresponding edge to the [DFA]. // -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. +// The func returns the computed target [DFA] state for the given input symbol t. +// If this does not lead to a valid [DFA] state, this method +// returns ATNSimulatorError. func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { reach := NewOrderedATNConfigSet() // if we don't find an existing DFA state // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + l.getReachableConfigSet(input, s.configs, reach, t) if len(reach.configs) == 0 { // we got nowhere on t from s if !reach.hasSemanticContext { // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. + // cause a fail-over from DFA later. l.addDFAEdge(s, t, ATNSimulatorError, nil) } // stop when we can't Match any more char return ATNSimulatorError } // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) + return l.addDFAEdge(s, t, nil, reach) } -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { if l.prevAccept.dfaState != nil { lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) @@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) } -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { +// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations +// we can reach upon input t. +// +// Parameter reach is a return parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule + // than a runtimeConfig that already reached an accept state for the same rule SkipAlt := ATNInvalidAltNumber - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + for _, cfg := range closure.configs { + currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt + if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { continue } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) } for _, trans := range cfg.GetState().GetTransitions() { target := l.getReachableTarget(trans, t) if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + lexerActionExecutor := cfg.lexerActionExecutor if lexerActionExecutor != nil { lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + treatEOFAsEpsilon := t == TokenEOF + config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) if l.closure(input, config, reach, currentAltReachedAcceptState, true, treatEOFAsEpsilon) { // any remaining configs for l alt have a lower priority @@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC } func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Printf("ACTION %v\n", lexerActionExecutor) } // seek to after last char in token @@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState return nil } -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { configs := NewOrderedATNConfigSet() for i := 0; i < len(p.GetTransitions()); i++ { target := p.GetTransitions()[i].getTarget() @@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord return configs } -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept +// closure since the alternatives within any lexer decision are ordered by +// preference, this method stops pursuing the closure as soon as an accept // state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. +// search from runtimeConfig, all other (potentially reachable) states for +// this rule would have a lower priority. // -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, +// The func returns true if an accept state is reached. +func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") } _, ok := config.state.(*RuleStopState) if ok { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { if l.recog != nil { fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) } else { @@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co } // side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, + configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { - var cfg *LexerATNConfig + var cfg *ATNConfig if trans.getSerializationType() == TransitionRULE { @@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC pt := trans.(*PredicateTransition) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) } - configs.SetHasSemanticContext(true) + configs.hasSemanticContext = true if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } @@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC // TODO: if the entry rule is invoked recursively, some // actions may be executed during the recursive call. The // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be + // isEmpty() is false. In this case, the config needs to be // split into two contexts - one with just the empty path // and another with everything but the empty path. // Unfortunately, the current algorithm does not allow @@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC return cfg } -// Evaluate a predicate specified in the lexer. +// evaluatePredicate eEvaluates a predicate specified in the lexer. // -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

+// If speculative is true, this method was called before +// [consume] for the Matched character. This method should call +// [consume] before evaluating the predicate to ensure position +// sensitive values, including [GetText], [GetLine], +// and [GetColumn], properly reflect the current +// lexer state. This method should restore input and the simulator +// to the original state before returning, i.e. undo the actions made by the +// call to [Consume]. // -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / +// The func returns true if the specified predicate evaluates to true. func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided if l.recog == nil { @@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream settings.dfaState = dfaState } -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { if to == nil && cfgs != nil { // leading to l call, ATNConfigSet.hasSemanticContext is used as a // marker indicating dynamic predicate evaluation makes l edge @@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // TJP notes: next time through the DFA, we see a pred again and eval. // If that gets us to a previously created (but dangling) DFA // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - + // + suppressEdge := cfgs.hasSemanticContext + cfgs.hasSemanticContext = false to = l.addDFAState(cfgs, true) if suppressEdge { @@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // Only track edges within the DFA bounds return to } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) } l.atn.edgeMu.Lock() @@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // configurations already. This method also detects the first // configuration containing an ATN rule stop state. Later, when // traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { +func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { + var firstConfigWithRuleStopState *ATNConfig + for _, cfg := range configs.configs { _, ok := cfg.GetState().(*RuleStopState) if ok { @@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) } if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, present := dfa.states.Get(proposed) + existing, present := dfa.Get(proposed) if present { // This state was already present, so just return it. @@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) // We need to add the new state // - proposed.stateNumber = dfa.states.Len() - configs.SetReadOnly(true) + proposed.stateNumber = dfa.Len() + configs.readOnly = true + configs.configLookup = nil // Not needed now proposed.configs = configs - dfa.states.Put(proposed) + dfa.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) @@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA { return l.decisionToDFA[mode] } -// Get the text Matched so far for the current token. +// GetText returns the text [Match]ed so far for the current token. func (l *LexerATNSimulator) GetText(input CharStream) string { // index is first lookahead char, don't include. return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go similarity index 72% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go rename to vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go index 76689615a6..dfdff000bc 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -14,11 +14,11 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer { return la } -// - Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -// -// / const ( + // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit + // a predicate during analysis if + // + // seeThruPreds==false LL1AnalyzerHitPred = TokenInvalidType ) @@ -38,11 +38,13 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { count := len(s.GetTransitions()) look := make([]*IntervalSet, count) for alt := 0; alt < count; alt++ { + look[alt] = NewIntervalSet() - lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing + // TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim! + lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) + + // Wipe out lookahead for la alternative if we found nothing, // or we had a predicate when we !seeThruPreds if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { look[alt] = nil @@ -51,32 +53,31 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { return look } -// * -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. +// Look computes the set of tokens that can follow s in the [ATN] in the +// specified ctx. // -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

+// If ctx is nil and the end of the rule containing +// s is reached, [EPSILON] is added to the result set. // -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context +// If ctx is not nil and the end of the outermost rule is +// reached, [EOF] is added to the result set. +// +// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a +// [BlockEndState] to detect epsilon paths through a closure. +// +// Parameter ctx is the complete parser context, or nil if the context // should be ignored // -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// / +// The func returns the set of tokens that can follow s in the [ATN] in the +// specified ctx. func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext + var lookContext *PredictionContext if ctx != nil { lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) } - la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) + la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), + NewBitSet(), true, true) return r } @@ -110,16 +111,17 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet // outermost context is reached. This parameter has no effect if {@code ctx} // is {@code nil}. -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { +func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { returnState := la.atn.states[ctx.getReturnState(i)] la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) } -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - c := NewBaseATNConfig6(s, 0, ctx) + c := NewATNConfig6(s, 0, ctx) if lookBusy.Contains(c) { return @@ -151,7 +153,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look return } - if ctx != BasePredictionContextEMPTY { + if ctx.pcType != PredictionContextEmpty { removed := calledRuleStack.contains(s.GetRuleIndex()) defer func() { if removed { @@ -202,7 +204,8 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look } } -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { +func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex.go new file mode 100644 index 0000000000..2b0cda4745 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex.go @@ -0,0 +1,41 @@ +//go:build !antlr.nomutex +// +build !antlr.nomutex + +package antlr + +import "sync" + +// Mutex is a simple mutex implementation which just delegates to sync.Mutex, it +// is used to provide a mutex implementation for the antlr package, which users +// can turn off with the build tag -tags antlr.nomutex +type Mutex struct { + mu sync.Mutex +} + +func (m *Mutex) Lock() { + m.mu.Lock() +} + +func (m *Mutex) Unlock() { + m.mu.Unlock() +} + +type RWMutex struct { + mu sync.RWMutex +} + +func (m *RWMutex) Lock() { + m.mu.Lock() +} + +func (m *RWMutex) Unlock() { + m.mu.Unlock() +} + +func (m *RWMutex) RLock() { + m.mu.RLock() +} + +func (m *RWMutex) RUnlock() { + m.mu.RUnlock() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go new file mode 100644 index 0000000000..35ce4353ee --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go @@ -0,0 +1,32 @@ +//go:build antlr.nomutex +// +build antlr.nomutex + +package antlr + +type Mutex struct{} + +func (m *Mutex) Lock() { + // No-op +} + +func (m *Mutex) Unlock() { + // No-op +} + +type RWMutex struct{} + +func (m *RWMutex) Lock() { + // No-op +} + +func (m *RWMutex) Unlock() { + // No-op +} + +func (m *RWMutex) RLock() { + // No-op +} + +func (m *RWMutex) RUnlock() { + // No-op +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go new file mode 100644 index 0000000000..923c7b52c4 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go @@ -0,0 +1,47 @@ +//go:build !antlr.stats + +package antlr + +// This file is compiled when the build configuration antlr.stats is not enabled. +// which then allows the compiler to optimize out all the code that is not used. +const collectStats = false + +// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled. +type goRunStats struct { +} + +var Statistics = &goRunStats{} + +func (s *goRunStats) AddJStatRec(_ *JStatRec) { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) CollectionAnomalies() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Reset() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Report(dir string, prefix string) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func (s *goRunStats) Analyze() { + // Do nothing - compiler will optimize this out (hopefully) +} + +type statsOption func(*goRunStats) error + +func (s *goRunStats) Configure(options ...statsOption) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + return nil + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go similarity index 80% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go rename to vendor/github.com/antlr4-go/antlr/v4/parser.go index d26bf06392..fb57ac15db 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go @@ -48,8 +48,10 @@ type BaseParser struct { _SyntaxErrors int } -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// +// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error +// recovery stuff. +// +//goland:noinspection GoUnusedExportedFunction func NewBaseParser(input TokenStream) *BaseParser { p := new(BaseParser) @@ -58,39 +60,46 @@ func NewBaseParser(input TokenStream) *BaseParser { // The input stream. p.input = nil + // The error handling strategy for the parser. The default value is a new // instance of {@link DefaultErrorStrategy}. p.errHandler = NewDefaultErrorStrategy() p.precedenceStack = make([]int, 0) p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. + + // The ParserRuleContext object for the currently executing rule. // p.is always non-nil during the parsing process. p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during + + // Specifies whether the parser should construct a parse tree during // the parsing process. The default value is {@code true}. p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is + + // When setTrace(true) is called, a reference to the + // TraceListener is stored here, so it can be easily removed in a + // later call to setTrace(false). The listener itself is // implemented as a parser listener so p.field is not directly used by // other parser methods. p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive + + // The list of ParseTreeListener listeners registered to receive // events during the parse. p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. + // incremented each time NotifyErrorListeners is called. p._SyntaxErrors = 0 p.SetInputStream(input) return p } -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with +// This field maps from the serialized ATN string to the deserialized [ATN] with // bypass alternatives. // -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +// [ATNDeserializationOptions.isGenerateRuleBypassTransitions] +// +//goland:noinspection GoUnusedGlobalVariable var bypassAltsAtnCache = make(map[string]int) // reset the parser's state// @@ -143,10 +152,13 @@ func (p *BaseParser) Match(ttype int) Token { p.Consume() } else { t = p.errHandler.RecoverInline(p) + if p.HasError() { + return nil + } if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol + + // we must have conjured up a new token during single token + // insertion if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -178,9 +190,8 @@ func (p *BaseParser) MatchWildcard() Token { } else { t = p.errHandler.RecoverInline(p) if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol + // we must have conjured up a new token during single token + // insertion if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -202,33 +213,27 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener { return p.parseListeners } -// Registers {@code listener} to receive events during the parsing process. +// AddParseListener registers listener to receive events during the parsing process. // -//

To support output-preserving grammar transformations (including but not +// To support output-preserving grammar transformations (including but not // limited to left-recursion removal, automated left-factoring, and // optimized code generation), calls to listener methods during the parse // may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// [ParseTreeWalker.DEFAULT] used after the parse is complete. In // particular, rule entry and exit events may occur in a different order // during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
+// rule entry methods may be omitted. // -// @param listener the listener to add +// With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same. // -// @panics nilPointerException if {@code} listener is {@code nil} +// - Alterations to the grammar used to generate code may change the +// behavior of the listener calls. +// - Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls. +// - Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls. func (p *BaseParser) AddParseListener(listener ParseTreeListener) { if listener == nil { panic("listener") @@ -239,11 +244,10 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) { p.parseListeners = append(p.parseListeners, listener) } -// Remove {@code listener} from the list of parse listeners. +// RemoveParseListener removes listener from the list of parse listeners. // -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove +// If listener is nil or has not been added as a parse +// listener, this func does nothing. func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { if p.parseListeners != nil { @@ -274,7 +278,7 @@ func (p *BaseParser) removeParseListeners() { p.parseListeners = nil } -// Notify any parse listeners of an enter rule event. +// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. func (p *BaseParser) TriggerEnterRuleEvent() { if p.parseListeners != nil { ctx := p.ctx @@ -285,9 +289,7 @@ func (p *BaseParser) TriggerEnterRuleEvent() { } } -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. func (p *BaseParser) TriggerExitRuleEvent() { if p.parseListeners != nil { // reverse order walk of listeners @@ -314,19 +316,16 @@ func (p *BaseParser) GetTokenFactory() TokenFactory { return p.input.GetTokenSource().GetTokenFactory() } -// Tell our token source and error strategy about a Newway to create tokens.// +// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. func (p *BaseParser) setTokenFactory(factory TokenFactory) { p.input.GetTokenSource().setTokenFactory(factory) } -// The ATN with bypass alternatives is expensive to create so we create it +// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it // lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. func (p *BaseParser) GetATNWithBypassAlts() { - // TODO + // TODO - Implement this? panic("Not implemented!") // serializedAtn := p.getSerializedATN() @@ -354,6 +353,7 @@ func (p *BaseParser) GetATNWithBypassAlts() { // String id = m.Get("ID") // +//goland:noinspection GoUnusedParameter func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { panic("NewParseTreePatternMatcher not implemented!") @@ -386,14 +386,16 @@ func (p *BaseParser) GetTokenStream() TokenStream { return p.input } -// Set the token stream and reset the parser.// +// SetTokenStream installs input as the token stream and resets the parser. func (p *BaseParser) SetTokenStream(input TokenStream) { p.input = nil p.reset() p.input = input } -// Match needs to return the current input symbol, which gets put +// GetCurrentToken returns the current token at LT(1). +// +// [Match] needs to return the current input symbol, which gets put // into the label for the associated token ref e.g., x=ID. func (p *BaseParser) GetCurrentToken() Token { return p.input.LT(1) @@ -446,7 +448,7 @@ func (p *BaseParser) addContextToParseTree() { } } -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) { p.SetState(state) p.ctx = localctx p.ctx.SetStart(p.input.LT(1)) @@ -474,7 +476,7 @@ func (p *BaseParser) ExitRule() { func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx + // if we have a new localctx, make sure we replace existing ctx // that is previous child of parse tree if p.BuildParseTrees && p.ctx != localctx { if p.ctx.GetParent() != nil { @@ -498,7 +500,7 @@ func (p *BaseParser) GetPrecedence() int { return p.precedenceStack[len(p.precedenceStack)-1] } -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) { p.SetState(state) p.precedenceStack.Push(precedence) p.ctx = localctx @@ -512,7 +514,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleI // // Like {@link //EnterRule} but for recursive rules. -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) { previous := p.ctx previous.SetParent(localctx) previous.SetInvokingState(state) @@ -530,7 +532,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, } func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() + _, _ = p.precedenceStack.Pop() p.ctx.SetStop(p.input.LT(-1)) retCtx := p.ctx // save current ctx (return value) // unroll so ctx is as it was before call to recursive method @@ -561,29 +563,22 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { return nil } -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { +func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool { return precedence >= p.precedenceStack[len(p.precedenceStack)-1] } +//goland:noinspection GoUnusedParameter func (p *BaseParser) inContext(context ParserRuleContext) bool { // TODO: useful in parser? return false } -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is +// IsExpectedToken checks whether symbol can follow the current state in the +// {ATN}. The behavior of p.method is equivalent to the following, but is // implemented such that the complete context-sensitive follow set does not // need to be explicitly constructed. // -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - +// return getExpectedTokens().contains(symbol) func (p *BaseParser) IsExpectedToken(symbol int) bool { atn := p.Interpreter.atn ctx := p.ctx @@ -611,11 +606,9 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool { return false } -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, +// GetExpectedTokens and returns the set of input symbols which could follow the current parser +// state and context, as given by [GetState] and [GetContext], // respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) func (p *BaseParser) GetExpectedTokens() *IntervalSet { return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) } @@ -626,7 +619,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { return atn.NextTokens(s, nil) } -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found. func (p *BaseParser) GetRuleIndex(ruleName string) int { var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] if ok { @@ -636,13 +629,10 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int { return -1 } -// Return List<String> of the rule names in your parser instance +// GetRuleInvocationStack returns a list of the rule names in your parser instance // leading up to a call to the current rule. You could override if // you want more details such as the file/line info of where // in the ATN a rule is invoked. -// -// this very useful for error messages. - func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { if c == nil { c = p.ctx @@ -668,16 +658,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { return stack } -// For debugging and other purposes.// +// GetDFAStrings returns a list of all DFA states used for debugging purposes func (p *BaseParser) GetDFAStrings() string { return fmt.Sprint(p.Interpreter.decisionToDFA) } -// For debugging and other purposes.// +// DumpDFA prints the whole of the DFA for debugging func (p *BaseParser) DumpDFA() { seenOne := false for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.states.Len() > 0 { + if dfa.Len() > 0 { if seenOne { fmt.Println() } @@ -692,8 +682,10 @@ func (p *BaseParser) GetSourceName() string { return p.GrammarFileName } -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. +// SetTrace installs a trace listener for the parse. +// +// During a parse it is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. This is for quick and dirty debugging. func (p *BaseParser) SetTrace(trace *TraceListener) { if trace == nil { p.RemoveParseListener(p.tracer) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go similarity index 64% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go index 8bcc46a0d9..724fa17a19 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -10,31 +10,49 @@ import ( "strings" ) -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorTraceATNSim = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false - TurnOffLRLoopEntryBranchOpt = false -) +// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over +// a standard JStore so that we can use Lazy instantiation of the JStore, mostly +// to avoid polluting the stats module with a ton of JStore instances with nothing in them. +type ClosureBusy struct { + bMap *JStore[*ATNConfig, Comparator[*ATNConfig]] + desc string +} + +// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules +func NewClosureBusy(desc string) *ClosureBusy { + return &ClosureBusy{ + desc: desc, + } +} + +func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) { + if c.bMap == nil { + c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc) + } + return c.bMap.Put(config) +} type ParserATNSimulator struct { - *BaseATNSimulator + BaseATNSimulator parser Parser predictionMode int input TokenStream startIndex int dfa *DFA - mergeCache *DoubleDict + mergeCache *JPCMap outerContext ParserRuleContext } +//goland:noinspection GoUnusedExportedFunction func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + p := &ParserATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } p.parser = parser p.decisionToDFA = decisionToDFA @@ -46,12 +64,12 @@ func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, shared p.outerContext = nil p.dfa = nil // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. + // Don't keep around as it wastes huge amounts of memory. [JPCMap] + // isn't Synchronized, but we're ok since two threads shouldn't reuse same + // parser/atn-simulator object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid + // the merge if we ever see a and b again. Note that (b,a) -> c should + // also be examined during cache lookup. // p.mergeCache = nil @@ -69,14 +87,14 @@ func (p *ParserATNSimulator) SetPredictionMode(v int) { func (p *ParserATNSimulator) reset() { } -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + " exec LA(1)==" + p.getLookaheadName(input) + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) } - p.input = input p.startIndex = input.Index() p.outerContext = outerContext @@ -88,7 +106,15 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou defer func() { p.dfa = nil - p.mergeCache = nil // wack cache after each prediction + p.mergeCache = nil // whack cache after each prediction + // Do not attempt to run a GC now that we're done with the cache as makes the + // GC overhead terrible for badly formed grammars and has little effect on well formed + // grammars. + // I have made some extra effort to try and reduce memory pressure by reusing allocations when + // possible. However, it can only have a limited effect. The real solution is to encourage grammar + // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect + // what is happening at runtime, along with using the error listener to report ambiguities. + input.Seek(index) input.Release(m) }() @@ -113,7 +139,7 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou if outerContext == nil { outerContext = ParserRuleContextEmpty } - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + " exec LA(1)==" + p.getLookaheadName(input) + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) @@ -142,47 +168,52 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou p.atn.stateMu.Unlock() } - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { + alt, re := p.execATN(dfa, s0, input, index, outerContext) + parser.SetError(re) + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) } return alt } -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - +// execATN performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. +// // There are some key conditions we're looking for after computing a new // set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - +// +// - If the set is empty, there is no viable alternative for current symbol +// - Does the state uniquely predict an alternative? +// - Does the state have a conflict that would prevent us from +// putting it on the work list? +// // We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: // -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { +// - Add an edge from previous DFA state to potentially NewDFA state, D, +// - Upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// - Collecting predicates and adding semantic context to DFA accept states +// - adding rule context to context-sensitive DFA accept states +// - Consuming an input symbol +// - Reporting a conflict +// - Reporting an ambiguity +// - Reporting a context sensitivity +// - Reporting insufficient predicates +// +// Cover these cases: +// +// - dead end +// - single alt +// - single alt + predicates +// - conflict +// - conflict + predicates +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + ", DFA state " + s0.String() + ", LA(1)==" + p.getLookaheadName(input) + @@ -191,7 +222,7 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, previousD := s0 - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("s0 = " + s0.String()) } t := input.LA(1) @@ -214,17 +245,17 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, input.Seek(startIndex) alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) if alt != ATNInvalidAltNumber { - return alt + return alt, nil } - - panic(e) + p.parser.SetError(e) + return ATNInvalidAltNumber, e } if D.requiresFullContext && p.predictionMode != PredictionModeSLL { // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() + conflictingAlts := D.configs.conflictingAlts if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL fail-over") } conflictIndex := input.Index() if conflictIndex != startIndex { @@ -232,10 +263,10 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, } conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("Full LL avoided") } - return conflictingAlts.minValue() + return conflictingAlts.minValue(), nil } if conflictIndex != startIndex { // restore the index so Reporting the fallback to full @@ -243,18 +274,18 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, input.Seek(conflictIndex) } } - if ParserATNSimulatorDFADebug { + if runtimeConfig.parserATNSimulatorDFADebug { fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) } fullCtx := true s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt + alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt, re } if D.isAcceptState { if D.predicates == nil { - return D.prediction + return D.prediction, nil } stopIndex := input.Index() input.Seek(startIndex) @@ -262,13 +293,13 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, switch alts.length() { case 0: - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) + return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex) case 1: - return alts.minValue() + return alts.minValue(), nil default: // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() + return alts.minValue(), nil } } previousD = D @@ -314,7 +345,8 @@ func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) // @return The computed target DFA state for the given input symbol // {@code t}. If {@code t} does not lead to a valid DFA state, p method // returns {@link //ERROR}. - +// +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { reach := p.computeReachSet(previousD.configs, t, false) @@ -322,12 +354,12 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) return ATNSimulatorError } - // create Newtarget state we'll add to DFA after it's complete + // create new target state we'll add to DFA after it's complete D := NewDFAState(-1, reach) predictedAlt := p.getUniqueAlt(reach) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { altSubSets := PredictionModegetConflictingAltSubsets(reach) fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + ", previous=" + previousD.configs.String() + @@ -340,17 +372,17 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t if predictedAlt != ATNInvalidAltNumber { // NO CONFLICT, UNIQUELY PREDICTED ALT D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) + D.configs.uniqueAlt = predictedAlt D.setPrediction(predictedAlt) } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) + D.configs.conflictingAlts = p.getConflictingAlts(reach) D.requiresFullContext = true // in SLL-only mode, we will stop at p state and return the minimum alt D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) + D.setPrediction(D.configs.conflictingAlts.minValue()) } - if D.isAcceptState && D.configs.HasSemanticContext() { + if D.isAcceptState && D.configs.hasSemanticContext { p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) if D.predicates != nil { D.setPrediction(ATNInvalidAltNumber) @@ -381,15 +413,17 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState } // comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("execATNWithFullContext " + s0.String()) } fullCtx := true foundExactAmbig := false - var reach ATNConfigSet + var reach *ATNConfigSet previous := s0 input.Seek(startIndex) t := input.LA(1) @@ -407,25 +441,23 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT // ATN states in SLL implies LL will also get nowhere. // If conflict in states that dip out, choose min since we // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) input.Seek(startIndex) alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) if alt != ATNInvalidAltNumber { - return alt + return alt, nil } - - panic(e) + return alt, p.noViableAlt(input, outerContext, previous, startIndex) } altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) + reach.uniqueAlt = p.getUniqueAlt(reach) // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() + if reach.uniqueAlt != ATNInvalidAltNumber { + predictedAlt = reach.uniqueAlt break } if p.predictionMode != PredictionModeLLExactAmbigDetection { @@ -454,9 +486,9 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT // If the configuration set uniquely predicts an alternative, // without conflict, then we know that it's a full LL decision // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { + if reach.uniqueAlt != ATNInvalidAltNumber { p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt + return predictedAlt, nil } // We do not check predicates here because we have checked them // on-the-fly when doing full context prediction. @@ -469,10 +501,10 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT // // For example, we might know that we have conflicting configurations. // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - + // conflict. It's possible to have non-conflicting alt subsets as in: + // // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - + // // from // // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), @@ -487,14 +519,15 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) - return predictedAlt + return predictedAlt, nil } -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() + p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()") } - intermediate := NewBaseATNConfigSet(fullCtx) + intermediate := NewATNConfigSet(fullCtx) // Configurations already in a rule stop state indicate reaching the end // of the decision rule (local context) or end of the start rule (full @@ -506,18 +539,18 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // ensure that the alternative Matching the longest overall sequence is // chosen when multiple such configurations can Match the input. - var skippedStopStates []*BaseATNConfig + var skippedStopStates []*ATNConfig // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { + for _, c := range closure.configs { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) } if _, ok := c.GetState().(*RuleStopState); ok { if fullCtx || t == TokenEOF { - skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { + skippedStopStates = append(skippedStopStates, c) + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("added " + c.String() + " to SkippedStopStates") } } @@ -527,9 +560,9 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt for _, trans := range c.GetState().GetTransitions() { target := p.getReachableTarget(trans, t) if target != nil { - cfg := NewBaseATNConfig4(c, target) + cfg := NewATNConfig4(c, target) intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("added " + cfg.String() + " to intermediate") } } @@ -537,7 +570,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt } // Now figure out where the reach operation can take us... - var reach ATNConfigSet + var reach *ATNConfigSet // This block optimizes the reach operation for intermediate sets which // trivially indicate a termination state for the overall @@ -565,8 +598,8 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // operation on the intermediate set to compute its initial value. // if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) + reach = NewATNConfigSet(fullCtx) + closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy") treatEOFAsEpsilon := t == TokenEOF amount := len(intermediate.configs) for k := 0; k < amount; k++ { @@ -588,10 +621,10 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // // This is handled before the configurations in SkippedStopStates, // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's + // already guaranteed to meet this condition whether it's // required. // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) + reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate)) } // If SkippedStopStates!=nil, then it contains at least one // configuration. For full-context reach operations, these @@ -607,41 +640,40 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt } } - if ParserATNSimulatorTraceATNSim { + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) } - if len(reach.GetItems()) == 0 { + if len(reach.configs) == 0 { return nil } return reach } -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. +// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from +// configs which are in a [RuleStopState]. If all +// configurations in configs are already in a rule stop state, this +// method simply returns configs. // -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is +// When lookToEndOfRule is true, this method uses +// [ATN].[NextTokens] for each configuration in configs which is // not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

+// from the configuration via epsilon-only transitions. // -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states +// When lookToEndOfRule is true, this method checks for rule stop states // reachable by epsilon-only transitions from each configuration in -// {@code configs}. +// configs. // -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { +// The func returns configs if all configurations in configs are in a +// rule stop state, otherwise it returns a new configuration set containing only +// the configurations from configs which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet { if PredictionModeallConfigsInRuleStopStates(configs) { return configs } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { + result := NewATNConfigSet(configs.fullCtx) + for _, config := range configs.configs { if _, ok := config.GetState().(*RuleStopState); ok { result.Add(config, p.mergeCache) continue @@ -650,91 +682,81 @@ func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfi NextTokens := p.atn.NextTokens(config.GetState(), nil) if NextTokens.contains(TokenEpsilon) { endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) + result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache) } } } return result } -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet { // always at least the implicit call to start rule initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + configs := NewATNConfigSet(fullCtx) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("computeStartState from ATN state " + a.String() + " initialContext=" + initialContext.String()) } for i := 0; i < len(a.GetTransitions()); i++ { target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) + c := NewATNConfig6(target, i+1, initialContext) + closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy") p.closure(c, configs, closureBusy, true, fullCtx, false) } return configs } -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation +// applyPrecedenceFilter transforms the start state computed by +// [computeStartState] to the special start state used by a +// precedence [DFA] for a particular precedence value. The transformation // process applies the following changes to the start state's configuration // set. // -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
+// 1. Evaluate the precedence predicates for each configuration using +// [SemanticContext].evalPrecedence. +// 2. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. +// +// Transformation 2 is valid for the following reasons: +// +// - The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule. +// - The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level. +// +// The prediction context must be considered by this filter to address +// situations like the following: +// +// grammar TA +// prog: statement* EOF +// statement: letterA | statement letterA 'b' +// letterA: 'a' // -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge +// In the above grammar, the [ATN] state immediately before the token +// reference 'a' in letterA is reachable from the left edge // of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these +// statement. The prediction context associated with each of these // configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} +// which stepped out to prog, and then back in to statement // from being eliminated by the filter. -//

// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { +// The func returns the transformed configuration set representing the start state +// for a precedence [DFA] at a particular precedence level (determined by +// calling [Parser].getPrecedence). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet { - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) + statesFromAlt1 := make(map[int]*PredictionContext) + configSet := NewATNConfigSet(configs.fullCtx) - for _, config := range configs.GetItems() { + for _, config := range configs.configs { // handle alt 1 first if config.GetAlt() != 1 { continue @@ -746,12 +768,12 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf } statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) + configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache) } else { configSet.Add(config, p.mergeCache) } } - for _, config := range configs.GetItems() { + for _, config := range configs.configs { if config.GetAlt() == 1 { // already handled @@ -780,10 +802,11 @@ func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATN return nil } -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { + for _, c := range configs.configs { if ambigAlts.contains(c.GetAlt()) { altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) } @@ -797,11 +820,11 @@ func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATN nPredAlts++ } } - // nonambig alts are nil in altToPred + // unambiguous alts are nil in altToPred if nPredAlts == 0 { altToPred = nil } - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) } return altToPred @@ -812,7 +835,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre containsPredicate := false for i := 1; i < len(altToPred); i++ { pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE + // un-predicated is indicated by SemanticContextNONE if ambigAlts != nil && ambigAlts.contains(i) { pairs = append(pairs, NewPredPrediction(pred, i)) } @@ -826,51 +849,42 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre return pairs } -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. +// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by +// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the +// Error state was reached during [ATN] simulation. // -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the +// The default implementation of this method uses the following +// algorithm to identify an [ATN] configuration which successfully parsed the // decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete +// [ParserRuleContext] returned by the calling rule will be complete // and valid, and the syntax error will be Reported later at a more -// localized location.

+// localized location. // -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
+// - If a syntactically valid path or paths reach the end of the decision rule, and +// they are semantically valid if predicated, return the min associated alt. +// - Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +// - Otherwise, return [ATNInvalidAltNumber]. // -//

// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration +// alternative which will result in a [FailedPredicateException] in +// the parser. Specifically, this could occur if the only configuration // capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific +// blocked by a semantic predicate. By choosing this alternative within +// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting +// [FailedPredicateException] in the parser will identify the specific // predicate which is preventing the parser from successfully parsing the // decision rule, which helps developers identify and correct logic errors // in semantic predicates. -//

// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper +// pass in the configs holding ATN configurations which were valid immediately before +// the ERROR state was reached, outerContext as the initial parser context from the paper // or the parser stack at the instant before prediction commences. // -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { +// The func returns the value to return from [AdaptivePredict], or +// [ATNInvalidAltNumber] if a suitable alternative was not +// identified and [AdaptivePredict] should report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) semValidConfigs := cfgs[0] semInvalidConfigs := cfgs[1] @@ -879,7 +893,7 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry return alt } // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { + if len(semInvalidConfigs.configs) > 0 { alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) if alt != ATNInvalidAltNumber { // syntactically viable path exists return alt @@ -888,10 +902,10 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry return ATNInvalidAltNumber } -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { alts := NewIntervalSet() - for _, c := range configs.GetItems() { + for _, c := range configs.configs { _, ok := c.GetState().(*RuleStopState) if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { @@ -915,14 +929,14 @@ func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConf // prediction, which is where predicates need to evaluate. type ATNConfigSetPair struct { - item0, item1 ATNConfigSet + item0, item1 *ATNConfigSet } -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet { + succeeded := NewATNConfigSet(configs.fullCtx) + failed := NewATNConfigSet(configs.fullCtx) - for _, c := range configs.GetItems() { + for _, c := range configs.configs { if c.GetSemanticContext() != SemanticContextNone { predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) if predicateEvaluationResult { @@ -934,15 +948,16 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS succeeded.Add(c, nil) } } - return []ATNConfigSet{succeeded, failed} + return []*ATNConfigSet{succeeded, failed} } -// Look through a list of predicate/alt pairs, returning alts for the +// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the +// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an +// un-predicated runtimeConfig which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. // -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { predictions := NewBitSet() for i := 0; i < len(predPredictions); i++ { @@ -956,11 +971,11 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti } predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) } if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) } predictions.add(pair.alt) @@ -972,19 +987,82 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti return predictions } -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { initialDepth := 0 p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, fullCtx, initialDepth, treatEOFAsEpsilon) } -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if ParserATNSimulatorTraceATNSim { +func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("closure(" + config.String() + ")") - //fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") + } + + var stack []*ATNConfig + visited := make(map[*ATNConfig]bool) + + stack = append(stack, config) + + for len(stack) > 0 { + currConfig := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currConfig]; ok { + continue + } + visited[currConfig] = true + + if _, ok := currConfig.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !currConfig.GetContext().isEmpty() { + for i := 0; i < currConfig.GetContext().length(); i++ { + if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[currConfig.GetContext().getReturnState(i)] + newContext := currConfig.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext()) + + stack = append(stack, c) + } + continue + } else if fullCtx { + // reached end of start rule + configs.Add(currConfig, p.mergeCache) + continue + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + } } + + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") } if _, ok := config.GetState().(*RuleStopState); ok { @@ -994,11 +1072,12 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs for i := 0; i < config.GetContext().length(); i++ { if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) + nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) continue } else { // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) } p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) @@ -1008,7 +1087,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs returnState := p.atn.states[config.GetContext().getReturnState(i)] newContext := config.GetContext().GetParent(i) // "pop" return state - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) // While we have context to pop back from, we may have // gotten that context AFTER having falling off a rule. // Make sure we track that we are now out of context. @@ -1022,7 +1101,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs return } else { // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) } } @@ -1030,8 +1109,10 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) } -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +// Do the actual work of walking epsilon edges +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { state := config.GetState() // optimization if !state.GetEpsilonOnlyTransitions() { @@ -1048,7 +1129,7 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, _, ok := t.(*ActionTransition) continueCollecting := collectPredicates && !ok c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { + if c != nil { newDepth := depth if _, ok := config.GetState().(*RuleStopState); ok { @@ -1056,7 +1137,7 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, // We can't get here if incoming config was rule stop and we had context // track how far we dip into outer context. Might // come in handy and we avoid evaluating context dependent - // preds if p is > 0. + // preds if this is > 0. if p.dfa != nil && p.dfa.getPrecedenceDfa() { if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { @@ -1072,9 +1153,9 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, continue } - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method + configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method newDepth-- - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("dips into outer ctx: " + c.String()) } } else { @@ -1098,8 +1179,9 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, } } -func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { - if TurnOffLRLoopEntryBranchOpt { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool { + if !runtimeConfig.lRLoopEntryBranchOpt { return false } @@ -1196,7 +1278,7 @@ func (p *ParserATNSimulator) getRuleName(index int) string { return sb.String() } -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { +func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig { switch t.getSerializationType() { case TransitionRULE: @@ -1208,13 +1290,13 @@ func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, co case TransitionACTION: return p.actionTransition(config, t.(*ActionTransition)) case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) + return NewATNConfig4(config, t.getTarget()) case TransitionATOM, TransitionRANGE, TransitionSET: // EOF transitions act like epsilon transitions after the first EOF // transition is traversed if treatEOFAsEpsilon { if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) + return NewATNConfig4(config, t.getTarget()) } } return nil @@ -1223,60 +1305,63 @@ func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, co } } -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) } - return NewBaseATNConfig4(config, t.getTarget()) + return NewATNConfig4(config, t.getTarget()) } -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") if p.parser != nil { fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) } } - var c *BaseATNConfig + var c *ATNConfig if collectPredicates && inContext { if fullCtx { // In full context mode, we can evaluate predicates on-the-fly // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates + // the runtimeConfig sets. It also obviates the need to test predicates // later during conflict resolution. currentPosition := p.input.Index() p.input.Seek(p.startIndex) predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + c = NewATNConfig4(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) } } else { - c = NewBaseATNConfig4(config, pt.getTarget()) + c = NewATNConfig4(config, pt.getTarget()) } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("runtimeConfig from pred transition=" + c.String()) } return c } -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) if p.parser != nil { fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) } } - var c *BaseATNConfig + var c *ATNConfig if collectPredicates && (!pt.isCtxDependent || inContext) { if fullCtx { // In full context mode, we can evaluate predicates on-the-fly @@ -1288,78 +1373,92 @@ func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTrans predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + c = NewATNConfig4(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) } } else { - c = NewBaseATNConfig4(config, pt.getTarget()) + c = NewATNConfig4(config, pt.getTarget()) } - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("config from pred transition=" + c.String()) } return c } -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) } returnState := t.followState newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) + return NewATNConfig1(config, t.getTarget(), newContext) } -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { +func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { altsets := PredictionModegetConflictingAltSubsets(configs) return PredictionModeGetAlts(altsets) } -// Sam pointed out a problem with the previous definition, v3, of +// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of // ambiguous states. If we have another state associated with conflicting // alternatives, we should keep going. For example, the following grammar // -// s : (ID | ID ID?) '' +// s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ;, it has a [DFA] +// state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]]. +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node +// because alternative to has another way to continue, via +// +// [6|2|[]]. // -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. // The key is that we have a single state that has config's only associated // with a single alternative, 2, and crucially the state transitions // among the configurations are all non-epsilon transitions. That means // we don't consider any conflicts that include alternative 2. So, we // ignore the conflict between alts 1 and 2. We ignore a set of // conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. +// associated with a single alt state in the state config-list map. // // It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: +// also a 3rd non-conflicting configuration for a different alternative: +// +// [1|1|[], 1|2|[], 8|3|[]]. +// +// This can come about from grammar: // -// a : A | A | A B +// a : A | A | A B // // After Matching input A, we reach the stop state for rule A, state 1. // State 8 is the state right before B. Clearly alternatives 1 and 2 // conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned +// However, alternative 3 will be able to continue, so we do not +// stop working on this state. +// +// In the previous example, we're concerned // with states associated with the conflicting alternatives. Here alt // 3 is not associated with the conflicting configs, but since we can continue // looking for input reasonably, I don't declare the state done. We // ignore a set of conflicting alts when we have an alternative // that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { + if configs.uniqueAlt != ATNInvalidAltNumber { conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) + conflictingAlts.add(configs.uniqueAlt) } else { - conflictingAlts = configs.GetConflictingAlts() + conflictingAlts = configs.conflictingAlts } return conflictingAlts } @@ -1384,11 +1483,10 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { return p.GetTokenName(input.LA(1)) } -// Used for debugging in AdaptivePredict around execATN but I cut -// -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { +// Used for debugging in [AdaptivePredict] around [execATN], but I cut +// it out for clarity now that alg. works well. We can leave this +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { panic("Not implemented") @@ -1418,13 +1516,13 @@ func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { // } } -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) } -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { +func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { + for _, c := range configs.configs { if alt == ATNInvalidAltNumber { alt = c.GetAlt() // found first alt } else if c.GetAlt() != alt { @@ -1452,8 +1550,10 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { // @return If {@code to} is {@code nil}, p method returns {@code nil} // otherwise p method returns the result of calling {@link //addDFAState} // on {@code to} +// +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) } if to == nil { @@ -1472,7 +1572,7 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA from.setIthEdge(t+1, to) // connect p.atn.edgeMu.Unlock() - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { var names []string if p.parser != nil { names = p.parser.GetLiteralNames() @@ -1483,48 +1583,49 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA return to } -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. +// addDFAState adds state D to the [DFA] if it is not already present, and returns +// the actual instance stored in the [DFA]. If a state equivalent to D +// is already in the [DFA], the existing state is returned. Otherwise, this +// method returns D after adding it to the [DFA]. // -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

+// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and +// does not change the DFA. // -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { if d == ATNSimulatorError { return d } - existing, present := dfa.states.Get(d) + + existing, present := dfa.Get(d) if present { - if ParserATNSimulatorTraceATNSim { + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Print("addDFAState " + d.String() + " exists") } return existing } - // The state was not present, so update it with configs + // The state will be added if not already there or we will be given back the existing state struct + // if it is present. // - d.stateNumber = dfa.states.Len() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) + d.stateNumber = dfa.Len() + if !d.configs.readOnly { + d.configs.OptimizeConfigs(&p.BaseATNSimulator) + d.configs.readOnly = true + d.configs.configLookup = nil } - dfa.states.Put(d) - if ParserATNSimulatorTraceATNSim { + dfa.Put(d) + + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("addDFAState new " + d.String()) } return d } -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) @@ -1534,8 +1635,9 @@ func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAl } } -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) @@ -1545,10 +1647,15 @@ func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, } } -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { +// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. +// +// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer +// so that they can see that this is happening and can take action if they want to. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go similarity index 77% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go rename to vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go index 1c8cee7479..c249bc1385 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go @@ -31,7 +31,9 @@ type ParserRuleContext interface { } type BaseParserRuleContext struct { - *BaseRuleContext + parentCtx RuleContext + invokingState int + RuleIndex int start, stop Token exception RecognitionException @@ -40,8 +42,22 @@ type BaseParserRuleContext struct { func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { prc := new(BaseParserRuleContext) + InitBaseParserRuleContext(prc, parent, invokingStateNumber) + return prc +} + +func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { + // What context invoked b rule? + prc.parentCtx = parent - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + prc.invokingState = -1 + } else { + prc.invokingState = invokingStateNumber + } prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, @@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) // The exception that forced prc rule to return. If the rule successfully // completed, prc is {@code nil}. prc.exception = nil - - return prc } func (prc *BaseParserRuleContext) SetException(e RecognitionException) { @@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string { return s } -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +// EnterRule is called when any rule is entered. +func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { } -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +// ExitRule is called when any rule is exited. +func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { } -// * Does not set parent link other add methods do that/// +// * Does not set parent link other add methods do that func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { if prc.children == nil { prc.children = make([]Tree, 0) @@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { return child } -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / +// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as +// we entered a rule. If we have a label, we will need to remove +// the generic ruleContext object. func (prc *BaseParserRuleContext) RemoveLastChild() { if prc.children != nil && len(prc.children) > 0 { prc.children = prc.children[0 : len(prc.children)-1] @@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int { return len(prc.children) } -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval } @@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } +func (prc *BaseParserRuleContext) SetParent(v Tree) { + if v == nil { + prc.parentCtx = nil + } else { + prc.parentCtx = v.(RuleContext) + } +} + +func (prc *BaseParserRuleContext) GetInvokingState() int { + return prc.invokingState +} + +func (prc *BaseParserRuleContext) SetInvokingState(t int) { + prc.invokingState = t +} + +func (prc *BaseParserRuleContext) GetRuleIndex() int { + return prc.RuleIndex +} + +func (prc *BaseParserRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} + +// IsEmpty returns true if the context of b is empty. +// +// A context is empty if there is no invoking state, meaning nobody calls +// current context. +func (prc *BaseParserRuleContext) IsEmpty() bool { + return prc.invokingState == -1 +} + +// GetParent returns the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +// +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of this +// method. +func (prc *BaseParserRuleContext) GetParent() Tree { + return prc.parentCtx +} + var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { @@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct { *BaseParserRuleContext } +//goland:noinspection GoUnusedExportedFunction func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { prc := new(BaseInterpreterRuleContext) diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go new file mode 100644 index 0000000000..a1d5186b8f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -0,0 +1,727 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +const ( + // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $ + // doesn't mean wildcard: + // + // $ + x = [$,x] + // + // Here, + // + // $ = EmptyReturnState + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here +// +//goland:noinspection GoUnusedGlobalVariable +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +const ( + PredictionContextEmpty = iota + PredictionContextSingleton + PredictionContextArray +) + +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + nep := &PredictionContext{} + nep.cachedHash = calculateEmptyHash() + nep.pcType = PredictionContextEmpty + nep.returnState = BasePredictionContextEmptyReturnState + return nep +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{} + pc.pcType = PredictionContextSingleton + pc.returnState = returnState + pc.parentCtx = parent + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + nec := &PredictionContext{} + nec.cachedHash = hash + nec.pcType = PredictionContextArray + nec.parents = parents + nec.returnStates = returnStates + return nec +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + if p == other { + return true + } + switch p.pcType { + case PredictionContextEmpty: + otherP := other.(*PredictionContext) + return other == nil || otherP == nil || otherP.isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other == nil || other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return intSlicesEqual(p.returnStates, other.returnStates) && + pcSliceEqual(p.parents, other.parents) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + if otherP == nil || otherP.pcType != PredictionContextSingleton { + return false + } + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as wildcard + if rootIsWildcard { + if a.isEmpty() { + return a + } + if b.isEmpty() { + return b + } + } + + // Convert either Singleton or Empty to arrays, so that we can merge them + // + ara := convertToArray(a) + arb := convertToArray(b) + return mergeArrays(ara, arb, rootIsWildcard, mergeCache) +} + +func convertToArray(pc *PredictionContext) *PredictionContext { + switch pc.Type() { + case PredictionContextEmpty: + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) + case PredictionContextSingleton: + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + default: + // Already an array + } + return pc +} + +// mergeSingletons merges two Singleton [PredictionContext] instances. +// +// Stack tops equal, parents merge is same return left graph. +//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A new root node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + return previous + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.Put(a, b, rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent.Equals(a.parentCtx) { + return a // ax + bx = ax, if a=b + } + if parent.Equals(b.parentCtx) { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array. + // New joined parent so create a new singleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.Put(a, b, spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent *PredictionContext + if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []*PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []*PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []*PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext { + if rootIsWildcard { + if a.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // // + b =// + } + if b.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a.isEmpty() && b.isEmpty() { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a.isEmpty() { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// +//goland:noinspection GoBoolExpressions +func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.Put(a, b, pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation + if M.Equals(a) { + if mergeCache != nil { + mergeCache.Put(a, b, a) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") + } + return a + } + if M.Equals(b) { + if mergeCache != nil { + mergeCache.Put(a, b, b) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") + } + return b + } + combineCommonParents(&mergedParents) + + if mergeCache != nil { + mergeCache.Put(a, b, M) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) + } + return M +} + +// Make pass over all M parents and merge any Equals() ones. +// Note that we pass a pointer to the slice as we want to modify it in place. +// +//goland:noinspection GoUnusedFunction +func combineCommonParents(parents *[]*PredictionContext) { + uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") + + for p := 0; p < len(*parents); p++ { + parent := (*parents)[p] + _, _ = uniqueParents.Put(parent) + } + for q := 0; q < len(*parents); q++ { + pc, _ := uniqueParents.Get((*parents)[q]) + (*parents)[q] = pc + } +} + +func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext { + if context.isEmpty() { + return context + } + existing, present := visited.Get(context) + if present { + return existing + } + + existing, present = contextCache.Get(context) + if present { + visited.Put(context, existing) + return existing + } + changed := false + parents := make([]*PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || !parent.Equals(context.GetParent(i)) { + if !changed { + parents = make([]*PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited.Put(context, context) + return context + } + var updated *PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.GetReturnStates()) + } + contextCache.add(updated) + visited.Put(updated, updated) + visited.Put(context, updated) + + return updated +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go new file mode 100644 index 0000000000..25dfb11e8f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go @@ -0,0 +1,48 @@ +package antlr + +var BasePredictionContextEMPTY = &PredictionContext{ + cachedHash: calculateEmptyHash(), + pcType: PredictionContextEmpty, + returnState: BasePredictionContextEmptyReturnState, +} + +// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. +type PredictionContextCache struct { + cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]] +} + +func NewPredictionContextCache() *PredictionContextCache { + return &PredictionContextCache{ + cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"), + } +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a new context to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext { + if ctx.isEmpty() { + return BasePredictionContextEMPTY + } + + // Put will return the existing entry if it is present (note this is done via Equals, not whether it is + // the same pointer), otherwise it will add the new entry and return that. + // + existing, present := p.cache.Get(ctx) + if present { + return existing + } + p.cache.Put(ctx, ctx) + return ctx +} + +func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) { + pc, exists := p.cache.Get(ctx) + return pc, exists +} + +func (p *PredictionContextCache) length() int { + return p.cache.Len() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go new file mode 100644 index 0000000000..3f85a6a520 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // PredictionModeSLL represents the SLL(*) prediction mode. + // This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the SLL prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful LL prediction abilities to complete successfully. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeSLL = 0 + + // PredictionModeLL represents the LL(*) prediction mode. + // This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // report a precise answer for exactly which alternatives are + // ambiguous. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLL = 1 + + // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode + // with exact ambiguity detection. + // + // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLLExactAmbigDetection = 2 +) + +// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition. +// +// This method computes the SLL prediction termination condition for both of +// the following cases: +// +// - The usual SLL+LL fallback upon SLL conflict +// - Pure SLL without LL fallback +// +// # Combined SLL+LL Parsing +// +// When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction. +// +// Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative, e.g. +// +// {1,2} and {3,4} +// +// If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations. +// +// Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty. +// +// # Heuristic +// +// As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon): +// +// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ';', it has a +// [DFA] state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]] +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node because alternative to has another way to continue, +// via +// +// [6|2|[]] +// +// It also let's us continue for this rule: +// +// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ; +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state immediately before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done. +// +// # Pure SLL Parsing +// +// To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic. +// +// # Predicates in SLL+LL Parsing +// +// SLL decisions don't evaluate predicates until after they reach [DFA] stop +// states because they need to create the [DFA] cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation, so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates. +// +// Implementation-wise, [ATNConfigSet] combines stack contexts but not +// semantic predicate contexts, so we might see two configurations like the +// following: +// +// (s, 1, x, {}), (s, 1, x', {p}) +// +// Before testing these configurations against others, we have to merge +// x and x' (without modifying the existing configurations). +// For example, we test (x+x')==x” when looking for conflicts in +// the following configurations: +// +// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {}) +// +// If the configuration set has predicates (as indicated by +// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of +// the configurations to strip out all the predicates so that a standard +// [ATNConfigSet] will merge everything ignoring predicates. +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool { + + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input, so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.hasSemanticContext { + // dup configs, tossing out semantic predicates + dup := NewATNConfigSet(false) + for _, c := range configs.configs { + + // NewATNConfig({semanticContext:}, c) + c = NewATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar predicates + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// The func returns true if any configuration in the supplied configs is in a [RuleStopState] +func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// the func returns true if all configurations in configs are in a +// [RuleStopState] +func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { + + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination. +// +// Can we stop looking ahead during [ATN] simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict. +// +// The basic idea is to split the set of configurations 'C', into +// conflicting subsets (s, _, ctx, _) and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical [ATNConfig].state and [ATNConfig].context values +// but a different [ATNConfig].alt value, e.g. +// +// (s, i, ctx, _) +// +// and +// +// (s, j, ctx, _) ; for i != j +// +// Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows: +// +// A_s,ctx = {i | (s, i, ctx, _)} +// +// for each configuration in C holding s and ctx fixed. +// +// Or in pseudo-code: +// +// for each configuration c in C: +// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred +// +// The values in map are the set of +// +// A_s,ctx +// +// sets. +// +// If +// +// |A_s,ctx| = 1 +// +// then there is no conflict associated with s and ctx. +// +// Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// further lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round. +// +// The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal; you will still have the conflict. It's just inefficient. It +// might even look until the end of file. +// +// No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check. +// +// # Conflicting Configs +// +// Two configurations: +// +// (s, i, x) and (s, j, x') +// +// conflict when i != j but x = x'. Because we merge all +// (s, i, _) configurations together, that means that there are at +// most n configurations associated with state s for +// n possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts x and x'. +// +// Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either x or x'. +// If the x associated with lowest alternative i +// is the superset, then i is the only possible prediction since the +// others resolve to min(i) as well. However, if x is +// associated with j > i then at least one stack configuration for +// j is not in conflict with alternative i. The algorithm +// should keep going, looking for more lookahead due to the uncertainty. +// +// For simplicity, I'm doing an equality check between x and +// x', which lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict. +// +// # Continue/Stop Rule +// +// Continue if the union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict. +// +// The complete set of alternatives, +// +// [i for (_, i, _)] +// +// tells us which alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set. +// +// Cases +// +// - no conflicts and more than 1 alternative in set => continue +// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set +// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set +// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1 +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets +// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2} +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets +// {1} ∪ {3} = {1,3} => continue +// +// # Exact Ambiguity Detection +// +// If all states report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set: +// +// |A_i| > 1 +// +// and +// +// A_i = A_j ; for all i, j +// +// In other words, we continue examining lookahead until all A_i +// have more than one alternative and all A_i are the same. If +// +// A={{1,2}, {1,3}} +// +// then regular LL prediction would terminate because the resolved set is {1}. +// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like: +// +// A={{1,2}} +// +// or +// +// {{1,2},{1,2}}, etc... +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more +// than one alternative. +// +// The func returns true if every [BitSet] in altsets has +// [BitSet].cardinality cardinality > 1 +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains +// exactly one alternative. +// +// The func returns true if altsets contains at least one [BitSet] with +// [BitSet].cardinality cardinality 1 +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains +// more than one alternative. +// +// The func returns true if altsets contains a [BitSet] with +// [BitSet].cardinality cardinality > 1, otherwise false +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent. +// +// The func returns true if every member of altsets is equal to the others. +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in +// altsets. If no such alternative exists, this method returns +// [ATNInvalidAltNumber]. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each [BitSet] +// in altsets, being the set of represented alternatives in altsets. +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// +// for each configuration c in configs: +// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred +func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { + configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()") + + for _, c := range configs.configs { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. +// +// for each configuration c in configs: +// map[c.ATNConfig.state] U= c.ATNConfig.alt} +func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.configs { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets +// if there is one. +// +// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go similarity index 70% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go rename to vendor/github.com/antlr4-go/antlr/v4/recognizer.go index bfe542d091..dcb8548cd1 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -26,6 +26,9 @@ type Recognizer interface { RemoveErrorListeners() GetATN() *ATN GetErrorListenerDispatch() ErrorListener + HasError() bool + GetError() RecognitionException + SetError(RecognitionException) } type BaseRecognizer struct { @@ -36,6 +39,7 @@ type BaseRecognizer struct { LiteralNames []string SymbolicNames []string GrammarFileName string + SynErr RecognitionException } func NewBaseRecognizer() *BaseRecognizer { @@ -45,17 +49,32 @@ func NewBaseRecognizer() *BaseRecognizer { return rec } +//goland:noinspection GoUnusedGlobalVariable var tokenTypeMapCache = make(map[string]int) + +//goland:noinspection GoUnusedGlobalVariable var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.12.0" + runtimeVersion := "4.13.1" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } } -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { +func (b *BaseRecognizer) SetError(err RecognitionException) { + b.SynErr = err +} + +func (b *BaseRecognizer) HasError() bool { + return b.SynErr != nil +} + +func (b *BaseRecognizer) GetError() RecognitionException { + return b.SynErr +} + +func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) { panic("action not implemented on Recognizer!") } @@ -105,9 +124,11 @@ func (b *BaseRecognizer) SetState(v int) { // return result //} -// Get a map from rule names to rule indexes. +// GetRuleIndexMap Get a map from rule names to rule indexes. // -//

Used for XPath and tree pattern compilation.

+// Used for XPath and tree pattern compilation. +// +// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed. func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") @@ -124,7 +145,8 @@ func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { // return result } -func (b *BaseRecognizer) GetTokenType(tokenName string) int { +// GetTokenType get the token type based upon its name +func (b *BaseRecognizer) GetTokenType(_ string) int { panic("Method not defined!") // var ttype = b.GetTokenTypeMap()[tokenName] // if (ttype !=nil) { @@ -162,26 +184,27 @@ func (b *BaseRecognizer) GetTokenType(tokenName string) int { // } //} -// What is the error header, normally line/character position information?// +// GetErrorHeader returns the error header, normally line/character position information. +// +// Can be overridden in sub structs embedding BaseRecognizer. func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { line := e.GetOffendingToken().GetLine() column := e.GetOffendingToken().GetColumn() return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) } -// How should a token be displayed in an error message? The default +// GetTokenErrorDisplay shows how a token should be displayed in an error message. // -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. // -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. +// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific +// implementations of [ANTLRErrorStrategy] may provide a similar +// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay() func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { if t == nil { return "" @@ -205,12 +228,14 @@ func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { return NewProxyErrorListener(b.listeners) } -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { +// Sempred embedding structs need to override this if there are sempreds or actions +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool { return true } -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { +// Precpred embedding structs need to override this if there are preceding predicates +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool { return true } diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go new file mode 100644 index 0000000000..f2ad04793e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// RuleContext is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. +// +// We actually carry no information about the rule associated with this context (except +// when parsing). We keep only the state number of the invoking state from +// the [ATN] submachine that invoked this. Contrast this with the s +// pointer inside [ParserRuleContext] that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the struct +// [ParserRuleContext], which embeds a RuleContext. +// +// @see ParserRuleContext +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go rename to vendor/github.com/antlr4-go/antlr/v4/semantic_context.go index a702e99def..68cb9061eb 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go @@ -9,14 +9,13 @@ import ( "strconv" ) -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// SemanticContext is a tree structure used to record the semantic context in which // -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

+// an ATN configuration is valid. It's either a single predicate, +// a conjunction p1 && p2, or a sum of products p1 || p2. // - +// I have scoped the AND, OR, and Predicate subclasses of +// [SemanticContext] within the scope of this outer ``class'' type SemanticContext interface { Equals(other Collectable[SemanticContext]) bool Hash() int @@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { var SemanticContextNone = NewPredicate(-1, -1, false) -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { +func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { return p } @@ -198,7 +197,7 @@ type AND struct { func NewAND(a, b SemanticContext) *AND { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.Put(o) @@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) and := new(AND) and.opnds = opnds @@ -316,12 +313,12 @@ func (a *AND) Hash() int { return murmurFinish(h, len(a.opnds)) } -func (a *OR) Hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { +func (o *OR) Hash() int { + h := murmurInit(41) // Init with o value different from AND + for _, op := range o.opnds { h = murmurUpdate(h, op.Hash()) } - return murmurFinish(h, len(a.opnds)) + return murmurFinish(h, len(o.opnds)) } func (a *AND) String() string { @@ -349,7 +346,7 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.Put(o) @@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) o := new(OR) o.opnds = opnds diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go new file mode 100644 index 0000000000..8cb5f3ed6f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -0,0 +1,280 @@ +//go:build antlr.stats + +package antlr + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strconv" +) + +// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default +// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag. +// + +// Tells various components to collect statistics - because it is only true when this file is included, it will +// allow the compiler to completely eliminate all the code that is only used when collecting statistics. +const collectStats = true + +// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run. +// It is exported so that it can be used by others to look for things that are not already looked for in the +// runtime statistics. +type goRunStats struct { + + // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created + // during a run. It is exported so that it can be used by others to look for things that are not already looked for + // within this package. + // + jStats []*JStatRec + jStatsLock RWMutex + topN int + topNByMax []*JStatRec + topNByUsed []*JStatRec + unusedCollections map[CollectionSource]int + counts map[CollectionSource]int +} + +const ( + collectionsFile = "collections" +) + +var ( + Statistics = &goRunStats{ + topN: 10, + } +) + +type statsOption func(*goRunStats) error + +// Configure allows the statistics system to be configured as the user wants and override the defaults +func (s *goRunStats) Configure(options ...statsOption) error { + for _, option := range options { + err := option(s) + if err != nil { + return err + } + } + return nil +} + +// WithTopN sets the number of things to list in the report when we are concerned with the top N things. +// +// For example, if you want to see the top 20 collections by size, you can do: +// +// antlr.Statistics.Configure(antlr.WithTopN(20)) +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + s.topN = topN + return nil + } +} + +// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user. +// +// The function gathers and analyzes a number of statistics about any particular run of +// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only +// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be +// especially useful in tracking down bugs or performance problems when an ANTLR user could +// supply the output from this package, but cannot supply the grammar file(s) they are using, even +// privately to the maintainers. +// +// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user +// must call this function their selves to analyze the statistics. This is because none of the infrastructure is +// extant unless the calling program is built with the antlr.stats tag like so: +// +// go build -tags antlr.stats . +// +// When a program is built with the antlr.stats tag, the Statistics object is created and available outside +// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the +// [Statistics.Report] function to report the statistics. +// +// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to +// me [Jim Idle] directly at jimi@idle.ws +// +// [Jim Idle]: https:://github.com/jim-idle +func (s *goRunStats) Analyze() { + + // Look for anything that looks strange and record it in our local maps etc for the report to present it + // + s.CollectionAnomalies() + s.TopNCollections() +} + +// TopNCollections looks through all the statistical records and gathers the top ten collections by size. +func (s *goRunStats) TopNCollections() { + + // Let's sort the stat records by MaxSize + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].MaxSize > s.jStats[j].MaxSize + }) + + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByMax = append(s.topNByMax, s.jStats[i]) + } + + // Sort by the number of times used + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts + }) + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByUsed = append(s.topNByUsed, s.jStats[i]) + } +} + +// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output +// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be +// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix. +func (s *goRunStats) Report(dir string, prefix string) error { + + isDir, err := isDirectory(dir) + switch { + case err != nil: + return err + case !isDir: + return fmt.Errorf("output directory `%s` is not a directory", dir) + } + s.reportCollections(dir, prefix) + + // Clean out any old data in case the user forgets + // + s.Reset() + return nil +} + +func (s *goRunStats) Reset() { + s.jStats = nil + s.topNByUsed = nil + s.topNByMax = nil +} + +func (s *goRunStats) reportCollections(dir, prefix string) { + cname := filepath.Join(dir, ".asciidoctor") + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + _, _ = f.WriteString(`// .asciidoctorconfig +++++ + +++++`) + _ = f.Close() + + fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc") + // If the file doesn't exist, create it, or append to the file + f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer func(f *os.File) { + err := f.Close() + if err != nil { + log.Fatal(err) + } + }(f) + _, _ = f.WriteString("= Collections for " + prefix + "\n\n") + + _, _ = f.WriteString("== Summary\n") + + if s.unusedCollections != nil { + _, _ = f.WriteString("=== Unused Collections\n") + _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n") + _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n") + _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n") + _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n") + _, _ = f.WriteString(" actually needed.\n\n") + + _, _ = f.WriteString("\n.Unused collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + + for k, v := range s.unusedCollections { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + f.WriteString("|===\n\n") + } + + _, _ = f.WriteString("\n.Summary of Collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + for k, v := range s.counts { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n") + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n") + for _, c := range s.topNByMax { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n") + for _, c := range s.topNByUsed { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") +} + +// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled. +func (s *goRunStats) AddJStatRec(rec *JStatRec) { + s.jStatsLock.Lock() + defer s.jStatsLock.Unlock() + s.jStats = append(s.jStats, rec) +} + +// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found. +func (s *goRunStats) CollectionAnomalies() { + s.jStatsLock.RLock() + defer s.jStatsLock.RUnlock() + s.counts = make(map[CollectionSource]int, len(s.jStats)) + for _, c := range s.jStats { + + // Accumlate raw counts + // + s.counts[c.Source]++ + + // Look for allocated but unused collections and count them + if c.MaxSize == 0 && c.Puts == 0 { + if s.unusedCollections == nil { + s.unusedCollections = make(map[CollectionSource]int) + } + s.unusedCollections[c.Source]++ + } + if c.MaxSize > 6000 { + fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar") + } + } + +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go new file mode 100644 index 0000000000..4d9eb94e5f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go @@ -0,0 +1,23 @@ +package antlr + +// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be +// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies +// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function +// for ideas on what can be gleaned from these statistics about collections. +type JStatRec struct { + Source CollectionSource + MaxSize int + CurSize int + Gets int + GetHits int + GetMisses int + GetHashConflicts int + GetNoEnt int + Puts int + PutHits int + PutMisses int + PutHashConflicts int + MaxSlotSize int + Description string + CreateStack []byte +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go similarity index 74% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go rename to vendor/github.com/antlr4-go/antlr/v4/token.go index f73b06bc6a..f5bc34229d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -35,6 +35,8 @@ type Token interface { GetTokenSource() TokenSource GetInputStream() CharStream + + String() string } type BaseToken struct { @@ -53,7 +55,7 @@ type BaseToken struct { const ( TokenInvalidType = 0 - // During lookahead operations, this "token" signifies we hit rule end ATN state + // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state // and did not follow it despite needing to. TokenEpsilon = -2 @@ -61,15 +63,16 @@ const ( TokenEOF = -1 - // All tokens go to the parser (unless Skip() is called in that rule) + // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. + // + // All tokens go to the parser (unless [Skip] is called in the lexer rule) // on a particular "channel". The parser tunes to a particular channel // so that whitespace etc... can go to the parser on a "hidden" channel. - TokenDefaultChannel = 0 - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - + // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. + // + // Anything on a different channel than TokenDefaultChannel is not parsed by parser. TokenHiddenChannel = 1 ) @@ -101,6 +104,25 @@ func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { return b.source } +func (b *BaseToken) GetText() string { + if b.text != "" { + return b.text + } + input := b.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if b.GetStart() < n && b.GetStop() < n { + return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop())) + } + return "" +} + +func (b *BaseToken) SetText(text string) { + b.text = text +} + func (b *BaseToken) GetTokenIndex() int { return b.tokenIndex } @@ -117,22 +139,45 @@ func (b *BaseToken) GetInputStream() CharStream { return b.source.charStream } +func (b *BaseToken) String() string { + txt := b.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if b.GetChannel() > 0 { + ch = ",channel=" + strconv.Itoa(b.GetChannel()) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(b.GetTokenIndex()) + "," + strconv.Itoa(b.GetStart()) + ":" + strconv.Itoa(b.GetStop()) + "='" + + txt + "',<" + strconv.Itoa(b.GetTokenType()) + ">" + + ch + "," + strconv.Itoa(b.GetLine()) + ":" + strconv.Itoa(b.GetColumn()) + "]" +} + type CommonToken struct { - *BaseToken + BaseToken } func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - t := new(CommonToken) - - t.BaseToken = new(BaseToken) + t := &CommonToken{ + BaseToken: BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 if t.source.tokenSource != nil { t.line = source.tokenSource.GetLine() t.column = source.tokenSource.GetCharPositionInLine() @@ -166,44 +211,3 @@ func (c *CommonToken) clone() *CommonToken { t.text = c.GetText() return t } - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go rename to vendor/github.com/antlr4-go/antlr/v4/token_source.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go similarity index 90% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/token_stream.go index 1527d43f60..bf4ff6633e 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go @@ -8,13 +8,14 @@ type TokenStream interface { IntStream LT(k int) Token + Reset() Get(index int) Token GetTokenSource() TokenSource SetTokenSource(TokenSource) GetAllText() string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string GetTextFromRuleContext(RuleContext) string GetTextFromTokens(Token, Token) string } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go similarity index 73% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go rename to vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go index b3e38af344..ccf59b465c 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go +++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go @@ -86,14 +86,15 @@ import ( // first example shows.

const ( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 ) // Define the rewrite operation hierarchy type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. // Return the index of the next token to operate on. Execute(buffer *bytes.Buffer) int @@ -112,19 +113,19 @@ type RewriteOperation interface { type BaseRewriteOperation struct { //Current index of rewrites list - instruction_index int + instructionIndex int //Token buffer index index int //Substitution text text string //Actual operation name - op_name string + opName string //Pointer to token steam tokens TokenStream } func (op *BaseRewriteOperation) GetInstructionIndex() int { - return op.instruction_index + return op.instructionIndex } func (op *BaseRewriteOperation) GetIndex() int { @@ -136,7 +137,7 @@ func (op *BaseRewriteOperation) GetText() string { } func (op *BaseRewriteOperation) GetOpName() string { - return op.op_name + return op.opName } func (op *BaseRewriteOperation) GetTokens() TokenStream { @@ -144,7 +145,7 @@ func (op *BaseRewriteOperation) GetTokens() TokenStream { } func (op *BaseRewriteOperation) SetInstructionIndex(val int) { - op.instruction_index = val + op.instructionIndex = val } func (op *BaseRewriteOperation) SetIndex(val int) { @@ -156,20 +157,20 @@ func (op *BaseRewriteOperation) SetText(val string) { } func (op *BaseRewriteOperation) SetOpName(val string) { - op.op_name = val + op.opName = val } func (op *BaseRewriteOperation) SetTokens(val TokenStream) { op.tokens = val } -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { +func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int { return op.index } func (op *BaseRewriteOperation) String() string { return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, + op.opName, op.tokens.Get(op.GetIndex()), op.text, ) @@ -182,10 +183,10 @@ type InsertBeforeOp struct { func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index, - text: text, - op_name: "InsertBeforeOp", - tokens: stream, + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, }} } @@ -201,20 +202,21 @@ func (op *InsertBeforeOp) String() string { return op.BaseRewriteOperation.String() } -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - +// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions +// first and then the "insert before" instructions at same index. Implementation +// of "insert after" is "insert before index+1". type InsertAfterOp struct { BaseRewriteOperation } func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { - return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index + 1, - text: text, - tokens: stream, - }} + return &InsertAfterOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }, + } } func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { @@ -229,7 +231,7 @@ func (op *InsertAfterOp) String() string { return op.BaseRewriteOperation.String() } -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp // instructions. type ReplaceOp struct { BaseRewriteOperation @@ -239,10 +241,10 @@ type ReplaceOp struct { func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { return &ReplaceOp{ BaseRewriteOperation: BaseRewriteOperation{ - index: from, - text: text, - op_name: "ReplaceOp", - tokens: stream, + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, }, LastIndex: to, } @@ -270,17 +272,17 @@ type TokenStreamRewriter struct { // You may have multiple, named streams of rewrite operations. // I'm calling these things "programs." // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int + programs map[string][]RewriteOperation + lastRewriteTokenIndexes map[string]int } func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { return &TokenStreamRewriter{ tokens: tokens, programs: map[string][]RewriteOperation{ - Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), + DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize), }, - last_rewrite_token_indexes: map[string]int{}, + lastRewriteTokenIndexes: map[string]int{}, } } @@ -291,110 +293,110 @@ func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { // Rollback the instruction stream for a program so that // the indicated instruction (via instructionIndex) is no // longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { - is, ok := tsr.programs[program_name] +func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) { + is, ok := tsr.programs[programName] if ok { - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + tsr.programs[programName] = is[MinTokenIndex:instructionIndex] } } -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { - tsr.Rollback(Default_Program_Name, instruction_index) +func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) { + tsr.Rollback(DefaultProgramName, instructionIndex) } -// Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +// DeleteProgram Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(programName string) { + tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included } func (tsr *TokenStreamRewriter) DeleteProgramDefault() { - tsr.DeleteProgram(Default_Program_Name) + tsr.DeleteProgram(DefaultProgramName) } -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { +func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) { // to insert after, just insert before next index (even if past end) var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { - tsr.InsertAfter(Default_Program_Name, index, text) + tsr.InsertAfter(DefaultProgramName, index, text) } -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) { + tsr.InsertAfter(programName, token.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { +func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) { var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { - tsr.InsertBefore(Default_Program_Name, index, text) + tsr.InsertBefore(DefaultProgramName, index, text) } -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) { + tsr.InsertBefore(programName, token.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { +func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) { if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", from, to, tsr.tokens.Size())) } var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) + tsr.Replace(DefaultProgramName, from, to, text) } func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { tsr.ReplaceDefault(index, index, text) } -func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) { + tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text) } func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { - tsr.ReplaceToken(Default_Program_Name, from, to, text) + tsr.ReplaceToken(DefaultProgramName, from, to, text) } func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { tsr.ReplaceTokenDefault(index, index, text) } -func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { - tsr.Replace(program_name, from, to, "") +func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) { + tsr.Replace(programName, from, to, "") } func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { - tsr.Delete(Default_Program_Name, from, to) + tsr.Delete(DefaultProgramName, from, to) } func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { tsr.DeleteDefault(index, index) } -func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") +func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) { + tsr.ReplaceToken(programName, from, to, "") } func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { - tsr.DeleteToken(Default_Program_Name, from, to) + tsr.DeleteToken(DefaultProgramName, from, to) } -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { - i, ok := tsr.last_rewrite_token_indexes[program_name] +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int { + i, ok := tsr.lastRewriteTokenIndexes[programName] if !ok { return -1 } @@ -402,15 +404,15 @@ func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) in } func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) + return tsr.GetLastRewriteTokenIndex(DefaultProgramName) } -func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { - tsr.last_rewrite_token_indexes[program_name] = i +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) { + tsr.lastRewriteTokenIndexes[programName] = i } func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { - is := make([]RewriteOperation, 0, Program_Init_Size) + is := make([]RewriteOperation, 0, ProgramInitSize) tsr.programs[name] = is return is } @@ -429,24 +431,24 @@ func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { return is } -// Return the text from the original tokens altered per the +// GetTextDefault returns the text from the original tokens altered per the // instructions given to this rewriter. func (tsr *TokenStreamRewriter) GetTextDefault() string { return tsr.GetText( - Default_Program_Name, + DefaultProgramName, NewInterval(0, tsr.tokens.Size()-1)) } -// Return the text from the original tokens altered per the +// GetText returns the text from the original tokens altered per the // instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] +func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string { + rewrites := tsr.programs[programName] start := interval.Start stop := interval.Stop // ensure start/end are in range stop = min(stop, tsr.tokens.Size()-1) start = max(start, 0) - if rewrites == nil || len(rewrites) == 0 { + if len(rewrites) == 0 { return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute } buf := bytes.Buffer{} @@ -482,11 +484,13 @@ func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) return buf.String() } -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: +// reduceToSingleOperationPerIndex combines operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... +// +// Here are the cases: // -// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.j.v leave alone, non-overlapping // I.i.u I.i.v combine: Iivu // // R.i-j.u R.x-y.v | i-j in x-y delete first R @@ -498,38 +502,38 @@ func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) // // I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping // R.x-y.v I.i.u | i in x-y ERROR // R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping // // I.i.u = insert u before op @ index i // R.x-y.u = replace x-y indexed tokens with u // -// First we need to examine replaces. For any replace op: +// First we need to examine replaces. For any replace op: // -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. // -// Then we can deal with inserts: +// Then we can deal with inserts: // -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this 'replace'. +// 3. throw exception if index in same range as previous replace // -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. // -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the 'replace' range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. // -// Return a map from token index to operation. +// The func returns a map from token index to operation. func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { // WALK REPLACES for i := 0; i < len(rewrites); i++ { @@ -547,7 +551,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit if iop.index == rop.index { // E.g., insert before 2, delete 2..2; update replace // text to include insert before, kill insert - rewrites[iop.instruction_index] = nil + rewrites[iop.instructionIndex] = nil if rop.text != "" { rop.text = iop.text + rop.text } else { @@ -555,7 +559,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit } } else if iop.index > rop.index && iop.index <= rop.LastIndex { // delete insert as it's a no-op. - rewrites[iop.instruction_index] = nil + rewrites[iop.instructionIndex] = nil } } } @@ -564,7 +568,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit if prevop, ok := rewrites[j].(*ReplaceOp); ok { if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { // delete replace as it's a no-op. - rewrites[prevop.instruction_index] = nil + rewrites[prevop.instructionIndex] = nil continue } // throw exception unless disjoint or identical @@ -572,10 +576,9 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit // Delete special case of replace (text==null): // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) if prevop.text == "" && rop.text == "" && !disjoint { - rewrites[prevop.instruction_index] = nil + rewrites[prevop.instructionIndex] = nil rop.index = min(prevop.index, rop.index) rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) - println("new rop" + rop.String()) //TODO: remove console write, taken from Java version } else if !disjoint { panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) } @@ -607,7 +610,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { if prevIop.index == iop.GetIndex() { iop.SetText(iop.GetText() + prevIop.text) - rewrites[prevIop.instruction_index] = nil + rewrites[prevIop.instructionIndex] = nil } } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/trace_listener.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go similarity index 67% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go rename to vendor/github.com/antlr4-go/antlr/v4/transition.go index 36be4f7331..313b0fc127 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go +++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go @@ -72,7 +72,7 @@ func (t *BaseTransition) getSerializationType() int { return t.serializationType } -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *BaseTransition) Matches(_, _, _ int) bool { panic("Not implemented") } @@ -89,6 +89,7 @@ const ( TransitionPRECEDENCE = 10 ) +//goland:noinspection GoUnusedGlobalVariable var TransitionserializationNames = []string{ "INVALID", "EPSILON", @@ -127,19 +128,22 @@ var TransitionserializationNames = []string{ // TransitionPRECEDENCE //} +// AtomTransition // TODO: make all transitions sets? no, should remove set edges type AtomTransition struct { - *BaseTransition + BaseTransition } func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. + t := &AtomTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionATOM, + label: intervalSet, + isEpsilon: false, + }, + } t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM return t } @@ -150,7 +154,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet { return s } -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *AtomTransition) Matches(symbol, _, _ int) bool { return t.label == symbol } @@ -159,48 +163,45 @@ func (t *AtomTransition) String() string { } type RuleTransition struct { - *BaseTransition - + BaseTransition followState ATNState ruleIndex, precedence int } func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t + return &RuleTransition{ + BaseTransition: BaseTransition{ + target: ruleStart, + isEpsilon: true, + serializationType: TransitionRULE, + }, + ruleIndex: ruleIndex, + precedence: precedence, + followState: followState, + } } -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *RuleTransition) Matches(_, _, _ int) bool { return false } type EpsilonTransition struct { - *BaseTransition - + BaseTransition outermostPrecedenceReturn int } func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t + return &EpsilonTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionEPSILON, + isEpsilon: true, + }, + outermostPrecedenceReturn: outermostPrecedenceReturn, + } } -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *EpsilonTransition) Matches(_, _, _ int) bool { return false } @@ -209,19 +210,20 @@ func (t *EpsilonTransition) String() string { } type RangeTransition struct { - *BaseTransition - + BaseTransition start, stop int } func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop + t := &RangeTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionRANGE, + isEpsilon: false, + }, + start: start, + stop: stop, + } t.intervalSet = t.makeLabel() return t } @@ -232,7 +234,7 @@ func (t *RangeTransition) makeLabel() *IntervalSet { return s } -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *RangeTransition) Matches(symbol, _, _ int) bool { return symbol >= t.start && symbol <= t.stop } @@ -252,40 +254,41 @@ type AbstractPredicateTransition interface { } type BaseAbstractPredicateTransition struct { - *BaseTransition + BaseTransition } func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t + return &BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + }, + } } func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} type PredicateTransition struct { - *BaseAbstractPredicateTransition - + BaseAbstractPredicateTransition isCtxDependent bool ruleIndex, predIndex int } func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t + return &PredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPREDICATE, + isEpsilon: true, + }, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + predIndex: predIndex, + } } -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *PredicateTransition) Matches(_, _, _ int) bool { return false } @@ -298,26 +301,25 @@ func (t *PredicateTransition) String() string { } type ActionTransition struct { - *BaseTransition - + BaseTransition isCtxDependent bool ruleIndex, actionIndex, predIndex int } func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t + return &ActionTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionACTION, + isEpsilon: true, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + actionIndex: actionIndex, + } } -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *ActionTransition) Matches(_, _, _ int) bool { return false } @@ -326,26 +328,27 @@ func (t *ActionTransition) String() string { } type SetTransition struct { - *BaseTransition + BaseTransition } func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionSET, + }, + } - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET if set != nil { t.intervalSet = set } else { t.intervalSet = NewIntervalSet() t.intervalSet.addOne(TokenInvalidType) } - return t } -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *SetTransition) Matches(symbol, _, _ int) bool { return t.intervalSet.contains(symbol) } @@ -354,16 +357,24 @@ func (t *SetTransition) String() string { } type NotSetTransition struct { - *SetTransition + SetTransition } func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET + t := &NotSetTransition{ + SetTransition: SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionNOTSET, + }, + }, + } + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } return t } @@ -377,16 +388,16 @@ func (t *NotSetTransition) String() string { } type WildcardTransition struct { - *BaseTransition + BaseTransition } func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t + return &WildcardTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionWILDCARD, + }, + } } func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { @@ -398,24 +409,24 @@ func (t *WildcardTransition) String() string { } type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - + BaseAbstractPredicateTransition precedence int } func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t + return &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPRECEDENCE, + isEpsilon: true, + }, + }, + precedence: precedence, + } } -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool { return false } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go similarity index 62% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go rename to vendor/github.com/antlr4-go/antlr/v4/tree.go index 85b4f137b5..c288420fb2 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go +++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go @@ -21,29 +21,23 @@ type Tree interface { type SyntaxTree interface { Tree - - GetSourceInterval() *Interval + GetSourceInterval() Interval } type ParseTree interface { SyntaxTree - Accept(Visitor ParseTreeVisitor) interface{} GetText() string - ToStringTree([]string, Recognizer) string } type RuleNode interface { ParseTree - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext } type TerminalNode interface { ParseTree - GetSymbol() Token } @@ -64,12 +58,12 @@ type BaseParseTreeVisitor struct{} var _ ParseTreeVisitor = &BaseParseTreeVisitor{} -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil } -// TODO +// TODO: Implement this? //func (this ParseTreeVisitor) Visit(ctx) { // if (Utils.isArray(ctx)) { // self := this @@ -101,15 +95,14 @@ type BaseParseTreeListener struct{} var _ ParseTreeListener = &BaseParseTreeListener{} -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} +func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} type TerminalNodeImpl struct { parentCtx RuleContext - - symbol Token + symbol Token } var _ TerminalNode = &TerminalNodeImpl{} @@ -123,7 +116,7 @@ func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { return tn } -func (t *TerminalNodeImpl) GetChild(i int) Tree { +func (t *TerminalNodeImpl) GetChild(_ int) Tree { return nil } @@ -131,7 +124,7 @@ func (t *TerminalNodeImpl) GetChildren() []Tree { return nil } -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { +func (t *TerminalNodeImpl) SetChildren(_ []Tree) { panic("Cannot set children on terminal node") } @@ -151,7 +144,7 @@ func (t *TerminalNodeImpl) GetPayload() interface{} { return t.symbol } -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { +func (t *TerminalNodeImpl) GetSourceInterval() Interval { if t.symbol == nil { return TreeInvalidInterval } @@ -179,7 +172,7 @@ func (t *TerminalNodeImpl) String() string { return t.symbol.GetText() } -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { +func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string { return t.String() } @@ -214,10 +207,9 @@ func NewParseTreeWalker() *ParseTreeWalker { return new(ParseTreeWalker) } -// Performs a walk on the given parse tree starting at the root and going down recursively -// with depth-first search. On each node, EnterRule is called before -// recursively walking down into child nodes, then -// ExitRule is called after the recursive call to wind up. +// Walk performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, [EnterRule] is called before +// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up. func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { switch tt := t.(type) { case ErrorNode: @@ -234,7 +226,7 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { } } -// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} +// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule] // then by triggering the event specific to the given parse tree node func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) @@ -242,12 +234,71 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { ctx.EnterRule(listener) } -// Exits a grammar rule by first triggering the event specific to the given parse tree node -// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} +// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event [ParseTreeListener].ExitEveryRule func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) ctx.ExitRule(listener) listener.ExitEveryRule(ctx) } +//goland:noinspection GoUnusedGlobalVariable var ParseTreeWalkerDefault = NewParseTreeWalker() + +type IterativeParseTreeWalker struct { + *ParseTreeWalker +} + +//goland:noinspection GoUnusedExportedFunction +func NewIterativeParseTreeWalker() *IterativeParseTreeWalker { + return new(IterativeParseTreeWalker) +} + +func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + var stack []Tree + var indexStack []int + currentNode := t + currentIndex := 0 + + for currentNode != nil { + // pre-order visit + switch tt := currentNode.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + i.EnterRule(listener, currentNode.(RuleNode)) + } + // Move down to first child, if exists + if currentNode.GetChildCount() > 0 { + stack = append(stack, currentNode) + indexStack = append(indexStack, currentIndex) + currentIndex = 0 + currentNode = currentNode.GetChild(0) + continue + } + + for { + // post-order visit + if ruleNode, ok := currentNode.(RuleNode); ok { + i.ExitRule(listener, ruleNode) + } + // No parent, so no siblings + if len(stack) == 0 { + currentNode = nil + currentIndex = 0 + break + } + // Move to next sibling if possible + currentIndex++ + if stack[len(stack)-1].GetChildCount() > currentIndex { + currentNode = stack[len(stack)-1].GetChild(currentIndex) + break + } + // No next, sibling, so move up + currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1] + currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1] + } + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/vendor/github.com/antlr4-go/antlr/v4/trees.go similarity index 81% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go rename to vendor/github.com/antlr4-go/antlr/v4/trees.go index d7dbb03228..f44c05d811 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go +++ b/vendor/github.com/antlr4-go/antlr/v4/trees.go @@ -8,10 +8,8 @@ import "fmt" /** A set of utility routines useful for all kinds of ANTLR trees. */ -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. +// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the +// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately. func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { if recog != nil { @@ -32,7 +30,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { } for i := 1; i < c; i++ { s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) + res += " " + s } res += ")" return res @@ -62,7 +60,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { } } - // no recog for rule names + // no recognition for rule names payload := t.GetPayload() if p2, ok := payload.(Token); ok { return p2.GetText() @@ -71,7 +69,9 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { return fmt.Sprint(t.GetPayload()) } -// Return ordered list of all children of this node +// TreesGetChildren returns am ordered list of all children of this node +// +//goland:noinspection GoUnusedExportedFunction func TreesGetChildren(t Tree) []Tree { list := make([]Tree, 0) for i := 0; i < t.GetChildCount(); i++ { @@ -80,9 +80,10 @@ func TreesGetChildren(t Tree) []Tree { return list } -// Return a list of all ancestors of this node. The first node of +// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root +// and the last node is the parent of this node. // -// list is the root and the last is the parent of this node. +//goland:noinspection GoUnusedExportedFunction func TreesgetAncestors(t Tree) []Tree { ancestors := make([]Tree, 0) t = t.GetParent() @@ -94,10 +95,12 @@ func TreesgetAncestors(t Tree) []Tree { return ancestors } +//goland:noinspection GoUnusedExportedFunction func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { return TreesfindAllNodes(t, ttype, true) } +//goland:noinspection GoUnusedExportedFunction func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { return TreesfindAllNodes(t, ruleIndex, false) } @@ -129,6 +132,7 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr } } +//goland:noinspection GoUnusedExportedFunction func TreesDescendants(t ParseTree) []ParseTree { nodes := []ParseTree{t} for i := 0; i < t.GetChildCount(); i++ { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go similarity index 74% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go rename to vendor/github.com/antlr4-go/antlr/v4/utils.go index 9fad5d916b..36a37f247a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go +++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -9,8 +9,10 @@ import ( "errors" "fmt" "math/bits" + "os" "strconv" "strings" + "syscall" ) func intMin(a, b int) int { @@ -31,7 +33,7 @@ func intMax(a, b int) int { type IntStack []int -var ErrEmptyStack = errors.New("Stack is empty") +var ErrEmptyStack = errors.New("stack is empty") func (s *IntStack) Pop() (int, error) { l := len(*s) - 1 @@ -47,33 +49,13 @@ func (s *IntStack) Push(e int) { *s = append(*s, e) } -type comparable interface { - Equals(other Collectable[any]) bool -} - -func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { - - return a.Equals(b) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.Hash() - } - - panic("Not Hasher") -} - -type hasher interface { - Hash() int -} - const bitsPerWord = 64 func indexForBit(bit int) int { return bit / bitsPerWord } +//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction func wordForBit(data []uint64, bit int) uint64 { idx := indexForBit(bit) if idx >= len(data) { @@ -94,6 +76,8 @@ type BitSet struct { data []uint64 } +// NewBitSet creates a new bitwise set +// TODO: See if we can replace with the standard library's BitSet func NewBitSet() *BitSet { return &BitSet{} } @@ -123,7 +107,7 @@ func (b *BitSet) or(set *BitSet) { setLen := set.minLen() maxLen := intMax(bLen, setLen) if maxLen > len(b.data) { - // Increase the size of len(b.data) to repesent the bits in both sets. + // Increase the size of len(b.data) to represent the bits in both sets. data := make([]uint64, maxLen) copy(data, b.data) b.data = data @@ -246,37 +230,6 @@ func (a *AltDict) values() []interface{} { return vs } -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - func EscapeWhitespace(s string, escapeSpaces bool) string { s = strings.Replace(s, "\t", "\\t", -1) @@ -288,6 +241,7 @@ func EscapeWhitespace(s string, escapeSpaces bool) string { return s } +//goland:noinspection GoUnusedExportedFunction func TerminalNodeToStringArray(sa []TerminalNode) []string { st := make([]string, len(sa)) @@ -298,6 +252,7 @@ func TerminalNodeToStringArray(sa []TerminalNode) []string { return st } +//goland:noinspection GoUnusedExportedFunction func PrintArrayJavaStyle(sa []string) string { var buffer bytes.Buffer @@ -350,3 +305,77 @@ func murmurFinish(h int, numberOfWords int) int { return int(hash) } + +func isDirectory(dir string) (bool, error) { + fileInfo, err := os.Stat(dir) + if err != nil { + switch { + case errors.Is(err, syscall.ENOENT): + // The given directory does not exist, so we will try to create it + // + err = os.MkdirAll(dir, 0755) + if err != nil { + return false, err + } + + return true, nil + case err != nil: + return false, err + default: + } + } + return fileInfo.IsDir(), err +} + +// intSlicesEqual returns true if the two slices of ints are equal, and is a little +// faster than slices.Equal. +func intSlicesEqual(s1, s2 []int) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if v != s2[i] { + return false + } + } + return true +} + +func pcSliceEqual(s1, s2 []*PredictionContext) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if !v.Equals(s2[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 4b19188d90..8a290f1972 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bufio" "bytes" "fmt" "io" @@ -21,34 +22,35 @@ type roffRenderer struct { } const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB" - codespanCloseTag = "\\fR" - codeTag = "\n.EX\n" - codeCloseTag = "\n.EE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - dtTag = "\n.TP\n" - dd2Tag = "\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on). + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" + tablePreprocessor = `'\" t` ) // NewRoffRenderer creates a new blackfriday Renderer for generating roff documents @@ -75,6 +77,16 @@ func (r *roffRenderer) GetExtensions() blackfriday.Extensions { // RenderHeader handles outputting the header at document start func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // We need to walk the tree to check if there are any tables. + // If there are, we need to enable the roff table preprocessor. + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Table { + out(w, tablePreprocessor+"\n") + return blackfriday.Terminate + } + return blackfriday.GoToNext + }) + // disable hyphenation out(w, ".nh\n") } @@ -322,6 +334,28 @@ func out(w io.Writer, output string) { } func escapeSpecialChars(w io.Writer, text []byte) { + scanner := bufio.NewScanner(bytes.NewReader(text)) + + // count the number of lines in the text + // we need to know this to avoid adding a newline after the last line + n := bytes.Count(text, []byte{'\n'}) + idx := 0 + + for scanner.Scan() { + dt := scanner.Bytes() + if idx < n { + idx++ + dt = append(dt, '\n') + } + escapeSpecialCharsLine(w, dt) + } + + if err := scanner.Err(); err != nil { + panic(err) + } +} + +func escapeSpecialCharsLine(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca9..92b78048e2 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,6 +1,24 @@ # Change history of go-restful -## [v3.11.0] - 2023-08-19 + +## [v3.12.1] - 2024-05-28 + +- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen) + +## [v3.12.0] - 2024-03-11 + +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 + +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + +## [v3.12.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 95a05a0894..7234604e47 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99f..80adf55fdf 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go index ba1fc5d5f1..6fd2bcd5a1 100644 --- a/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) + for _, eachRoute := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb) if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } sort.Sort(candidates) @@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] - if routeHasCustomVerb && hasCustomVerb(routeToken){ + if routeHasCustomVerb && hasCustomVerb(routeToken) { if !isMatchCustomVerb(routeToken, requestToken) { return false, 0, 0 } @@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques // detectWebService returns the best matching webService given the list of path tokens. // see also computeWebserviceScore func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService + var bestWs *WebService score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + for _, eachWS := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens) if matches && (eachScore > score) { - best = each + bestWs = eachWS score = eachScore } } - return best + return bestWs } // computeWebserviceScore returns whether tokens match and // the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) { + if len(routeTokens) > len(requestTokens) { return false, 0 } score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { + for i := 0; i < len(routeTokens); i++ { + eachRequestToken := requestTokens[i] + eachRouteToken := routeTokens[i] + if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 { score++ continue } - if len(other) > 0 && strings.HasPrefix(other, "{") { + if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") { // no empty match - if len(each) == 0 { + if len(eachRequestToken) == 0 { return false, score } - score += 1 + score++ + + if colon := strings.Index(eachRouteToken, ":"); colon != -1 { + // match by regex + matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken) + if matchesToken { + score++ // extra score for regex match + } + } } else { // not a parameter - if each != other { + if eachRequestToken != eachRouteToken { return false, score } - score += (len(tokens) - i) * 10 //fuzzy + score += (len(routeTokens) - i) * 10 //fuzzy } } return true, score diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e94..a9b3faaa81 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/fvbommel/sortorder/README.md b/vendor/github.com/fvbommel/sortorder/README.md deleted file mode 100644 index 06779c8852..0000000000 --- a/vendor/github.com/fvbommel/sortorder/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# sortorder [![PkgGoDev](https://pkg.go.dev/badge/github.com/fvbommel/sortorder)](https://pkg.go.dev/github.com/fvbommel/sortorder) - - import "github.com/fvbommel/sortorder" - -Sort orders and comparison functions. - -Case-insensitive sort orders are in the `casefolded` sub-package -because it pulls in the Unicode tables in the standard library, -which can add significantly to the size of binaries. diff --git a/vendor/github.com/fvbommel/sortorder/doc.go b/vendor/github.com/fvbommel/sortorder/doc.go deleted file mode 100644 index a7dd9585d0..0000000000 --- a/vendor/github.com/fvbommel/sortorder/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package sortorder implements sort orders and comparison functions. -// -// Currently, it only implements so-called "natural order", where integers -// embedded in strings are compared by value. -package sortorder // import "github.com/fvbommel/sortorder" diff --git a/vendor/github.com/fvbommel/sortorder/natsort.go b/vendor/github.com/fvbommel/sortorder/natsort.go deleted file mode 100644 index e4f15110b8..0000000000 --- a/vendor/github.com/fvbommel/sortorder/natsort.go +++ /dev/null @@ -1,76 +0,0 @@ -package sortorder - -// Natural implements sort.Interface to sort strings in natural order. This -// means that e.g. "abc2" < "abc12". -// -// Non-digit sequences and numbers are compared separately. The former are -// compared bytewise, while digits are compared numerically (except that -// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") -// -// Limitation: only ASCII digits (0-9) are considered. -type Natural []string - -func (n Natural) Len() int { return len(n) } -func (n Natural) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -func (n Natural) Less(i, j int) bool { return NaturalLess(n[i], n[j]) } - -func isDigit(b byte) bool { return '0' <= b && b <= '9' } - -// NaturalLess compares two strings using natural ordering. This means that e.g. -// "abc2" < "abc12". -// -// Non-digit sequences and numbers are compared separately. The former are -// compared bytewise, while digits are compared numerically (except that -// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") -// -// Limitation: only ASCII digits (0-9) are considered. -func NaturalLess(str1, str2 string) bool { - idx1, idx2 := 0, 0 - for idx1 < len(str1) && idx2 < len(str2) { - c1, c2 := str1[idx1], str2[idx2] - dig1, dig2 := isDigit(c1), isDigit(c2) - switch { - case dig1 != dig2: // Digits before other characters. - return dig1 // True if LHS is a digit, false if the RHS is one. - case !dig1: // && !dig2, because dig1 == dig2 - // UTF-8 compares bytewise-lexicographically, no need to decode - // codepoints. - if c1 != c2 { - return c1 < c2 - } - idx1++ - idx2++ - default: // Digits - // Eat zeros. - for ; idx1 < len(str1) && str1[idx1] == '0'; idx1++ { - } - for ; idx2 < len(str2) && str2[idx2] == '0'; idx2++ { - } - // Eat all digits. - nonZero1, nonZero2 := idx1, idx2 - for ; idx1 < len(str1) && isDigit(str1[idx1]); idx1++ { - } - for ; idx2 < len(str2) && isDigit(str2[idx2]); idx2++ { - } - // If lengths of numbers with non-zero prefix differ, the shorter - // one is less. - if len1, len2 := idx1-nonZero1, idx2-nonZero2; len1 != len2 { - return len1 < len2 - } - // If they're equally long, string comparison is correct. - if nr1, nr2 := str1[nonZero1:idx1], str2[nonZero2:idx2]; nr1 != nr2 { - return nr1 < nr2 - } - // Otherwise, the one with less zeros is less. - // Because everything up to the number is equal, comparing the index - // after the zeros is sufficient. - if nonZero1 != nonZero2 { - return nonZero1 < nonZero2 - } - } - // They're identical so far, so continue comparing. - } - // So far they are identical. At least one is ended. If the other continues, - // it sorts last. - return len(str1) < len(str2) -} diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 0000000000..f1c181ec9c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 0000000000..38cb9ae101 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..c794b2b0c6 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..de0965e12d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/vendor/github.com/fvbommel/sortorder/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE similarity index 93% rename from vendor/github.com/fvbommel/sortorder/LICENSE rename to vendor/github.com/fxamacker/cbor/v2/LICENSE index 5c695fb590..eaa8504921 100644 --- a/vendor/github.com/fvbommel/sortorder/LICENSE +++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -1,17 +1,21 @@ -The MIT License (MIT) -Copyright (c) 2015 Frits van Bommel +MIT License + +Copyright (c) 2019-present Faye Amacker + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 0000000000..af0a79507e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 0000000000..9c05146d16 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 0000000000..823bff12ce --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 0000000000..ea0f39e24f --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 0000000000..ec038a49ec --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 0000000000..85842ac736 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 0000000000..44afb86608 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 0000000000..23f68b984c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 0000000000..6508e291d6 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 0000000000..8b4b4bbc59 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 0000000000..31c39336dd --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 0000000000..de175cee4a --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 0000000000..507ab6c184 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 0000000000..81228acf0f --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 0000000000..5c4d2b7a42 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 0000000000..b40793b95e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index d975773d49..d970c7cf44 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -264,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 0000000000..e7f28ed6b7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go index 03555184d1..20a359bb60 100644 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -16,9 +16,130 @@ package swag import ( "sort" + "strings" "sync" ) +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + // indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. // Since go1.9, this may be implemented with sync.Map. type indexOfInitialisms struct { @@ -55,7 +176,7 @@ func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { func (m *indexOfInitialisms) sorted() (result []string) { m.sortMutex.Lock() defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { + m.index.Range(func(key, _ interface{}) bool { k := key.(string) result = append(result, k) return true @@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) { sort.Sort(sort.Reverse(byInitialism(result))) return } + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb8..8bb64ac32f 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7dc..274727a866 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 0000000000..90745d5ca9 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,8 @@ +package swag + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 0413f7447c..5051401c49 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,22 +105,6 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { return strings.TrimSpace(str) @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,26 +162,31 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { + out := make([]string, 0, len(*in)) + for _, w := range *in { original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) @@ -251,6 +194,8 @@ func ToHumanNameTitle(name string) string { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index a8c4e359ea..f59e025932 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,6 +16,7 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" "reflect" @@ -50,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index 0905f63539..6e2fc073d4 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -10,9 +10,12 @@ go_library( "cel.go", "decls.go", "env.go", + "folding.go", "io.go", + "inlining.go", "library.go", "macro.go", + "optimizer.go", "options.go", "program.go", "validator.go", @@ -56,7 +59,11 @@ go_test( "cel_test.go", "decls_test.go", "env_test.go", + "folding_test.go", "io_test.go", + "inlining_test.go", + "optimizer_test.go", + "validator_test.go", ], data = [ "//cel/testdata:gen_test_fds", @@ -70,6 +77,7 @@ go_test( "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", + "//ext:go_default_library", "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go index 0f9501341b..b59e3708de 100644 --- a/vendor/github.com/google/cel-go/cel/decls.go +++ b/vendor/github.com/google/cel-go/cel/decls.go @@ -353,43 +353,3 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { return nil, fmt.Errorf("unsupported decl: %v", d) } } - -func typeValueToKind(tv ref.Type) (Kind, error) { - switch tv { - case types.BoolType: - return BoolKind, nil - case types.DoubleType: - return DoubleKind, nil - case types.IntType: - return IntKind, nil - case types.UintType: - return UintKind, nil - case types.ListType: - return ListKind, nil - case types.MapType: - return MapKind, nil - case types.StringType: - return StringKind, nil - case types.BytesType: - return BytesKind, nil - case types.DurationType: - return DurationKind, nil - case types.TimestampType: - return TimestampKind, nil - case types.NullType: - return NullTypeKind, nil - case types.TypeType: - return TypeKind, nil - default: - switch tv.TypeName() { - case "dyn": - return DynKind, nil - case "google.protobuf.Any": - return AnyKind, nil - case "optional": - return OpaqueKind, nil - default: - return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName()) - } - } -} diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index b5c3b4cc55..a8650c4e9f 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -38,26 +38,42 @@ type Source = common.Source // Ast representing the checked or unchecked expression, its source, and related metadata such as // source position information. type Ast struct { - expr *exprpb.Expr - info *exprpb.SourceInfo - source Source - refMap map[int64]*celast.ReferenceInfo - typeMap map[int64]*types.Type + source Source + impl *celast.AST +} + +// NativeRep converts the AST to a Go-native representation. +func (ast *Ast) NativeRep() *celast.AST { + if ast == nil { + return nil + } + return ast.impl } // Expr returns the proto serializable instance of the parsed/checked expression. +// +// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr() +// the result instead. func (ast *Ast) Expr() *exprpb.Expr { - return ast.expr + if ast == nil { + return nil + } + pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr()) + return pbExpr } // IsChecked returns whether the Ast value has been successfully type-checked. func (ast *Ast) IsChecked() bool { - return ast.typeMap != nil && len(ast.typeMap) > 0 + return ast.NativeRep().IsChecked() } // SourceInfo returns character offset and newline position information about expression elements. func (ast *Ast) SourceInfo() *exprpb.SourceInfo { - return ast.info + if ast == nil { + return nil + } + pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo()) + return pbInfo } // ResultType returns the output type of the expression if the Ast has been type-checked, else @@ -65,9 +81,6 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo { // // Deprecated: use OutputType func (ast *Ast) ResultType() *exprpb.Type { - if !ast.IsChecked() { - return chkdecls.Dyn - } out := ast.OutputType() t, err := TypeToExprType(out) if err != nil { @@ -79,16 +92,18 @@ func (ast *Ast) ResultType() *exprpb.Type { // OutputType returns the output type of the expression if the Ast has been type-checked, else // returns cel.DynType as the parse step cannot infer types. func (ast *Ast) OutputType() *Type { - t, found := ast.typeMap[ast.expr.GetId()] - if !found { - return DynType + if ast == nil { + return types.ErrorType } - return t + return ast.NativeRep().GetType(ast.NativeRep().Expr().ID()) } // Source returns a view of the input used to create the Ast. This source may be complete or // constructed from the SourceInfo. func (ast *Ast) Source() Source { + if ast == nil { + return nil + } return ast.source } @@ -198,29 +213,28 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { // It is possible to have both non-nil Ast and Issues values returned from this call: however, // the mere presence of an Ast does not imply that it is valid for use. func (e *Env) Check(ast *Ast) (*Ast, *Issues) { - // Note, errors aren't currently possible on the Ast to ParsedExpr conversion. - pe, _ := AstToParsedExpr(ast) - // Construct the internal checker env, erroring if there is an issue adding the declarations. chk, err := e.initChecker() if err != nil { errs := common.NewErrors(ast.Source()) errs.ReportError(common.NoLocation, err.Error()) - return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } - res, errs := checker.Check(pe, ast.Source(), chk) + checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk) if len(errs.GetErrors()) > 0 { - return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } // Manually create the Ast to ensure that the Ast source information (which may be more // detailed than the information provided by Check), is returned to the caller. ast = &Ast{ - source: ast.Source(), - expr: res.Expr, - info: res.SourceInfo, - refMap: res.ReferenceMap, - typeMap: res.TypeMap} + source: ast.Source(), + impl: checked} + + // Avoid creating a validator config if it's not needed. + if len(e.validators) == 0 { + return ast, nil + } // Generate a validator configuration from the set of configured validators. vConfig := newValidatorConfig() @@ -230,9 +244,9 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { } } // Apply additional validators on the type-checked result. - iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) for _, v := range e.validators { - v.Validate(e, vConfig, res, iss) + v.Validate(e, vConfig, checked, iss) } if iss.Err() != nil { return nil, iss @@ -295,17 +309,13 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { copy(chkOptsCopy, e.chkOpts) // Copy the declarations if needed. - varsCopy := []*decls.VariableDecl{} if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) - } else { - // If the type-checker has not been instantiated, ensure the unvalidated declarations are - // provided to the extended Env instance. - varsCopy = make([]*decls.VariableDecl, len(e.variables)) - copy(varsCopy, e.variables) } + varsCopy := make([]*decls.VariableDecl, len(e.variables)) + copy(varsCopy, e.variables) // Copy macros and program options macsCopy := make([]parser.Macro, len(e.macros)) @@ -402,6 +412,17 @@ func (e *Env) Libraries() []string { return libraries } +// HasFunction returns whether a specific function has been configured in the environment +func (e *Env) HasFunction(functionName string) bool { + _, ok := e.functions[functionName] + return ok +} + +// Functions returns map of Functions, keyed by function name, that have been configured in the environment. +func (e *Env) Functions() map[string]*decls.FunctionDecl { + return e.functions +} + // HasValidator returns whether a specific ASTValidator has been configured in the environment. func (e *Env) HasValidator(name string) bool { for _, v := range e.validators { @@ -429,16 +450,11 @@ func (e *Env) Parse(txt string) (*Ast, *Issues) { // It is possible to have both non-nil Ast and Issues values returned from this call; however, // the mere presence of an Ast does not imply that it is valid for use. func (e *Env) ParseSource(src Source) (*Ast, *Issues) { - res, errs := e.prsr.Parse(src) + parsed, errs := e.prsr.Parse(src) if len(errs.GetErrors()) > 0 { return nil, &Issues{errs: errs} } - // Manually create the Ast to ensure that the text source information is propagated on - // subsequent calls to Check. - return &Ast{ - source: src, - expr: res.GetExpr(), - info: res.GetSourceInfo()}, nil + return &Ast{source: src, impl: parsed}, nil } // Program generates an evaluable instance of the Ast within the environment (Env). @@ -534,8 +550,9 @@ func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { // TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an // Ast format and then Program again. func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { - pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State()) - expr, err := AstToString(ParsedExprToAst(pruned)) + pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State()) + newAST := &Ast{source: a.Source(), impl: pruned} + expr, err := AstToString(newAST) if err != nil { return nil, err } @@ -556,16 +573,10 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { // EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and // extension functions provided by estimator. func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { - checked := &celast.CheckedAST{ - Expr: ast.Expr(), - SourceInfo: ast.SourceInfo(), - TypeMap: ast.typeMap, - ReferenceMap: ast.refMap, - } extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) extendedOpts = append(extendedOpts, opts...) extendedOpts = append(extendedOpts, e.costOptions...) - return checker.Cost(checked, estimator, extendedOpts...) + return checker.Cost(ast.impl, estimator, extendedOpts...) } // configure applies a series of EnvOptions to the current environment. @@ -707,7 +718,7 @@ type Error = common.Error // Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. type Issues struct { errs *common.Errors - info *exprpb.SourceInfo + info *celast.SourceInfo } // NewIssues returns an Issues struct from a common.Errors object. @@ -718,7 +729,7 @@ func NewIssues(errs *common.Errors) *Issues { // NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata // which can be used with the `ReportErrorAtID` method for additional error reports within the context // information that's inferred from an expression id. -func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues { +func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues { return &Issues{ errs: errs, info: info, @@ -749,10 +760,10 @@ func (i *Issues) Append(other *Issues) *Issues { if i == nil { return other } - if other == nil { + if other == nil || i == other { return i } - return NewIssues(i.errs.Append(other.errs.GetErrors())) + return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info) } // String converts the issues to a suitable display string. @@ -768,30 +779,7 @@ func (i *Issues) String() string { // The source metadata for the expression at `id`, if present, is attached to the error report. // To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo. func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) { - i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...) -} - -// locationByID returns a common.Location given an expression id. -// -// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source -// as this implementation relies on the abstractions present in the protobuf SourceInfo object, -// and is replicated in the checker. -func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location { - positions := sourceInfo.GetPositions() - var line = 1 - if offset, found := positions[id]; found { - col := int(offset) - for _, lineOffset := range sourceInfo.GetLineOffsets() { - if lineOffset < offset { - line++ - col = int(offset - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) - } - return common.NoLocation + i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...) } // getStdEnv lazy initializes the CEL standard environment. @@ -809,7 +797,7 @@ type interopCELTypeProvider struct { // FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. // -// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type +// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if et, found := p.FindType(typeName); found { @@ -822,10 +810,17 @@ func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, b return nil, false } +// FindStructFieldNames returns an empty set of field for the interop provider. +// +// To inspect the field names, migrate to a `types.Provider` implementation. +func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + return []string{}, false +} + // FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field // name, if one exists. // -// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type +// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { if ft, found := p.FindFieldType(structType, fieldName); found { diff --git a/vendor/github.com/google/cel-go/cel/folding.go b/vendor/github.com/google/cel-go/cel/folding.go new file mode 100644 index 0000000000..d7060896d3 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/folding.go @@ -0,0 +1,559 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// ConstantFoldingOption defines a functional option for configuring constant folding. +type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) + +// MaxConstantFoldIterations limits the number of times literals may be folding during optimization. +// +// Defaults to 100 if not set. +func MaxConstantFoldIterations(limit int) ConstantFoldingOption { + return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) { + opt.maxFoldIterations = limit + return opt, nil + } +} + +// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate +// literal values within function calls and select statements with their evaluated result. +func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) { + folder := &constantFoldingOptimizer{ + maxFoldIterations: defaultMaxConstantFoldIterations, + } + var err error + for _, o := range opts { + folder, err = o(folder) + if err != nil { + return nil, err + } + } + return folder, nil +} + +type constantFoldingOptimizer struct { + maxFoldIterations int +} + +// Optimize queries the expression graph for scalar and aggregate literal expressions within call and +// select statements and then evaluates them and replaces the call site with the literal result. +// +// Note: only values which can be represented as literals in CEL syntax are supported. +func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + + // Walk the list of foldable expression and continue to fold until there are no more folds left. + // All of the fold candidates returned by the constantExprMatcher should succeed unless there's + // a logic bug with the selection of expressions. + foldableExprs := ast.MatchDescendants(root, constantExprMatcher) + foldCount := 0 + for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations { + for _, fold := range foldableExprs { + // If the expression could be folded because it's a non-strict call, and the + // branches are pruned, continue to the next fold. + if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) { + continue + } + // Otherwise, assume all context is needed to evaluate the expression. + err := tryFold(ctx, a, fold) + if err != nil { + ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error()) + return a + } + } + foldCount++ + foldableExprs = ast.MatchDescendants(root, constantExprMatcher) + } + // Once all of the constants have been folded, try to run through the remaining comprehensions + // one last time. In this case, there's no guarantee they'll run, so we only update the + // target comprehension node with the literal value if the evaluation succeeds. + for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) { + tryFold(ctx, a, compre) + } + + // If the output is a list, map, or struct which contains optional entries, then prune it + // to make sure that the optionals, if resolved, do not surface in the output literal. + pruneOptionalElements(ctx, root) + + // Ensure that all intermediate values in the folded expression can be represented as valid + // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents` + // to avoid extra allocations during this final pass through the AST. + ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() != ast.LiteralKind { + return + } + val := e.AsLiteral() + adapted, err := adaptLiteral(ctx, val) + if err != nil { + ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error()) + return + } + ctx.UpdateExpr(e, adapted) + })) + + return a +} + +// tryFold attempts to evaluate a sub-expression to a literal. +// +// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise +// the method will return an error. +func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error { + // Assume all context is needed to evaluate the expression. + subAST := &Ast{ + impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()), + } + prg, err := ctx.Program(subAST) + if err != nil { + return err + } + out, _, err := prg.Eval(NoVars()) + if err != nil { + return err + } + // Update the fold expression to be a literal. + ctx.UpdateExpr(expr, ctx.NewLiteral(out)) + return nil +} + +// maybePruneBranches inspects the non-strict call expression to determine whether +// a branch can be removed. Evaluation will naturally prune logical and / or calls, +// but conditional will not be pruned cleanly, so this is one small area where the +// constant folding step reimplements a portion of the evaluator. +func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool { + call := expr.AsCall() + args := call.Args() + switch call.FunctionName() { + case operators.LogicalAnd, operators.LogicalOr: + return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr) + case operators.Conditional: + cond := args[0] + truthy := args[1] + falsy := args[2] + if cond.Kind() != ast.LiteralKind { + return false + } + if cond.AsLiteral() == types.True { + ctx.UpdateExpr(expr, truthy) + } else { + ctx.UpdateExpr(expr, falsy) + } + return true + case operators.In: + haystack := args[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.False)) + return true + } + needle := args[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.True)) + return true + } + } + } + } + return false +} + +func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool { + shortcircuit := types.False + skip := types.True + if function == operators.LogicalOr { + shortcircuit = types.True + skip = types.False + } + newArgs := []ast.Expr{} + for _, arg := range args { + if arg.Kind() != ast.LiteralKind { + newArgs = append(newArgs, arg) + continue + } + if arg.AsLiteral() == skip { + continue + } + if arg.AsLiteral() == shortcircuit { + ctx.UpdateExpr(expr, arg) + return true + } + } + if len(newArgs) == 0 { + newArgs = append(newArgs, args[0]) + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + if len(newArgs) == 1 { + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...)) + return true +} + +// pruneOptionalElements works from the bottom up to resolve optional elements within +// aggregate literals. +// +// Note, many aggregate literals will be resolved as arguments to functions or select +// statements, so this method exists to handle the case where the literal could not be +// fully resolved or exists outside of a call, select, or comprehension context. +func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) { + aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher) + for _, lit := range aggregateLiterals { + switch lit.Kind() { + case ast.ListKind: + pruneOptionalListElements(ctx, lit) + case ast.MapKind: + pruneOptionalMapEntries(ctx, lit) + case ast.StructKind: + pruneOptionalStructFields(ctx, lit) + } + } +} + +func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) { + l := e.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() + if len(optIndices) == 0 { + return + } + updatedElems := []ast.Expr{} + updatedIndices := []int32{} + newOptIndex := -1 + for _, e := range elems { + newOptIndex++ + if !l.IsOptional(int32(newOptIndex)) { + updatedElems = append(updatedElems, e) + continue + } + if e.Kind() != ast.LiteralKind { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + optElemVal, ok := e.AsLiteral().(*types.Optional) + if !ok { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + if !optElemVal.HasValue() { + newOptIndex-- // Skipping causes the list to get smaller. + continue + } + ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue())) + updatedElems = append(updatedElems, e) + } + ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices)) +} + +func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) { + m := e.AsMap() + entries := m.Entries() + updatedEntries := []ast.EntryExpr{} + modified := false + for _, e := range entries { + entry := e.AsMapEntry() + key := entry.Key() + val := entry.Value() + // If the entry is not optional, or the value-side of the optional hasn't + // been resolved to a literal, then preserve the entry as-is. + if !entry.IsOptional() || val.Kind() != ast.LiteralKind { + updatedEntries = append(updatedEntries, e) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedEntries = append(updatedEntries, e) + continue + } + // When the key is not a literal, but the value is, then it needs to be + // restored to an optional value. + if key.Kind() != ast.LiteralKind { + undoOptVal, err := adaptLiteral(ctx, optElemVal) + if err != nil { + ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err) + } + ctx.UpdateExpr(val, undoOptVal) + updatedEntries = append(updatedEntries, e) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedEntry := ctx.NewMapEntry(key, val, false) + updatedEntries = append(updatedEntries, updatedEntry) + } + if modified { + ctx.UpdateExpr(e, ctx.NewMap(updatedEntries)) + } +} + +func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) { + s := e.AsStruct() + fields := s.Fields() + updatedFields := []ast.EntryExpr{} + modified := false + for _, f := range fields { + field := f.AsStructField() + val := field.Value() + if !field.IsOptional() || val.Kind() != ast.LiteralKind { + updatedFields = append(updatedFields, f) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedFields = append(updatedFields, f) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedField := ctx.NewStructField(field.Name(), val, false) + updatedFields = append(updatedFields, updatedField) + } + if modified { + ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields)) + } +} + +// adaptLiteral converts a runtime CEL value to its equivalent literal expression. +// +// For strongly typed values, the type-provider will be used to reconstruct the fields +// which are present in the literal and their equivalent initialization values. +func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) { + switch t := val.Type().(type) { + case *types.Type: + switch t { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + return ctx.NewLiteral(val), nil + case types.DurationType: + return ctx.NewCall( + overloads.TypeConvertDuration, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.TimestampType: + return ctx.NewCall( + overloads.TypeConvertTimestamp, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.OptionalType: + opt := val.(*types.Optional) + if !opt.HasValue() { + return ctx.NewCall("optional.none"), nil + } + target, err := adaptLiteral(ctx, opt.GetValue()) + if err != nil { + return nil, err + } + return ctx.NewCall("optional.of", target), nil + case types.TypeType: + return ctx.NewIdent(val.(*types.Type).TypeName()), nil + case types.ListType: + l, ok := val.(traits.Lister) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + elems := make([]ast.Expr, l.Size().(types.Int)) + idx := 0 + it := l.Iterator() + for it.HasNext() == types.True { + elemVal := it.Next() + elemExpr, err := adaptLiteral(ctx, elemVal) + if err != nil { + return nil, err + } + elems[idx] = elemExpr + idx++ + } + return ctx.NewList(elems, []int32{}), nil + case types.MapType: + m, ok := val.(traits.Mapper) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + entries := make([]ast.EntryExpr, m.Size().(types.Int)) + idx := 0 + it := m.Iterator() + for it.HasNext() == types.True { + keyVal := it.Next() + keyExpr, err := adaptLiteral(ctx, keyVal) + if err != nil { + return nil, err + } + valVal := m.Get(keyVal) + valExpr, err := adaptLiteral(ctx, valVal) + if err != nil { + return nil, err + } + entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false) + idx++ + } + return ctx.NewMap(entries), nil + default: + provider := ctx.CELTypeProvider() + fields, found := provider.FindStructFieldNames(t.TypeName()) + if !found { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + tester := val.(traits.FieldTester) + indexer := val.(traits.Indexer) + fieldInits := []ast.EntryExpr{} + for _, f := range fields { + field := types.String(f) + if tester.IsSet(field) != types.True { + continue + } + fieldVal := indexer.Get(field) + fieldExpr, err := adaptLiteral(ctx, fieldVal) + if err != nil { + return nil, err + } + fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false)) + } + return ctx.NewStruct(t.TypeName(), fieldInits), nil + } + } + return nil, fmt.Errorf("failed to adapt %v to literal", val) +} + +// constantExprMatcher matches calls, select statements, and comprehensions whose arguments +// are all constant scalar or aggregate literal values. +// +// Only comprehensions which are not nested are included as possible constant folds, and only +// if all variables referenced in the comprehension stack exist are only iteration or +// accumulation variables. +func constantExprMatcher(e ast.NavigableExpr) bool { + switch e.Kind() { + case ast.CallKind: + return constantCallMatcher(e) + case ast.SelectKind: + sel := e.AsSelect() // guaranteed to be a navigable value + return constantMatcher(sel.Operand().(ast.NavigableExpr)) + case ast.ComprehensionKind: + if isNestedComprehension(e) { + return false + } + vars := map[string]bool{} + constantExprs := true + visitor := ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() == ast.ComprehensionKind { + nested := e.AsComprehension() + vars[nested.AccuVar()] = true + vars[nested.IterVar()] = true + } + if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] { + constantExprs = false + } + }) + ast.PreOrderVisit(e, visitor) + return constantExprs + default: + return false + } +} + +// constantCallMatcher identifies strict and non-strict calls which can be folded. +func constantCallMatcher(e ast.NavigableExpr) bool { + call := e.AsCall() + children := e.Children() + fnName := call.FunctionName() + if fnName == operators.LogicalAnd { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.LogicalOr { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.Conditional { + cond := children[0] + if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType { + return true + } + } + if fnName == operators.In { + haystack := children[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + return true + } + needle := children[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + return true + } + } + } + } + // convert all other calls with constant arguments + for _, child := range children { + if !constantMatcher(child) { + return false + } + } + return true +} + +func isNestedComprehension(e ast.NavigableExpr) bool { + parent, found := e.Parent() + for found { + if parent.Kind() == ast.ComprehensionKind { + return true + } + parent, found = parent.Parent() + } + return false +} + +func aggregateLiteralMatcher(e ast.NavigableExpr) bool { + return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind +} + +var ( + constantMatcher = ast.ConstantValueMatcher() +) + +const ( + defaultMaxConstantFoldIterations = 100 +) diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go new file mode 100644 index 0000000000..78d5bea65b --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/inlining.go @@ -0,0 +1,228 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/traits" +) + +// InlineVariable holds a variable name to be matched and an AST representing +// the expression graph which should be used to replace it. +type InlineVariable struct { + name string + alias string + def *ast.AST +} + +// Name returns the qualified variable or field selection to replace. +func (v *InlineVariable) Name() string { + return v.name +} + +// Alias returns the alias to use when performing cel.bind() calls during inlining. +func (v *InlineVariable) Alias() string { + return v.alias +} + +// Expr returns the inlined expression value. +func (v *InlineVariable) Expr() ast.Expr { + return v.def.Expr() +} + +// Type indicates the inlined expression type. +func (v *InlineVariable) Type() *Type { + return v.def.GetType(v.def.Expr().ID()) +} + +// NewInlineVariable declares a variable name to be replaced by a checked expression. +func NewInlineVariable(name string, definition *Ast) *InlineVariable { + return NewInlineVariableWithAlias(name, name, definition) +} + +// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression. +// If the variable occurs more than once, the provided alias will be used to replace the expressions +// where the variable name occurs. +func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable { + return &InlineVariable{name: name, alias: alias, def: definition.impl} +} + +// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions. +// +// If a variable occurs one time, the variable is replaced by the inline definition. If the +// variable occurs more than once, the variable occurences are replaced by a cel.bind() call. +func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer { + return &inliningOptimizer{variables: inlineVars} +} + +type inliningOptimizer struct { + variables []*InlineVariable +} + +func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + for _, inlineVar := range opt.variables { + matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name())) + // Skip cases where the variable isn't in the expression graph + if len(matches) == 0 { + continue + } + + // For a single match, do a direct replacement of the expression sub-graph. + if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) { + for _, match := range matches { + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type()) + } + continue + } + + // For multiple matches, find the least common ancestor (lca) and insert the + // variable as a cel.bind() macro. + var lca ast.NavigableExpr = root + lcaAncestorCount := 0 + ancestors := map[int64]int{} + for _, match := range matches { + // Update the identifier matches with the provided alias. + parent, found := match, true + for found { + ancestorCount, hasAncestor := ancestors[parent.ID()] + if !hasAncestor { + ancestors[parent.ID()] = 1 + parent, found = parent.Parent() + continue + } + if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) { + lca = parent + lcaAncestorCount = ancestorCount + } + ancestors[parent.ID()] = ancestorCount + 1 + parent, found = parent.Parent() + } + aliasExpr := ctx.NewIdent(inlineVar.Alias()) + opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type()) + } + + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + // Update the least common ancestor by inserting a cel.bind() call to the alias. + inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca) + opt.inlineExpr(ctx, lca, inlined, inlineVar.Type()) + ctx.SetMacroCall(lca.ID(), bindMacro) + } + return a +} + +// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining +// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is +// made to determine whether the inlined value can be presence or existence tested. +func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) { + switch prev.Kind() { + case ast.SelectKind: + sel := prev.AsSelect() + if !sel.IsTestOnly() { + ctx.UpdateExpr(prev, inlined) + return + } + opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType) + default: + ctx.UpdateExpr(prev, inlined) + } +} + +// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe +// expression appropriate for the inlined type, if possible. +// +// If the rewrite is not possible an error is reported at the inline expression site. +func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) { + // If the input inlined expression is not a select expression it won't work with the has() + // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error. + if inlined.Kind() == ast.SelectKind { + presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined) + ctx.UpdateExpr(prev, presenceTest) + ctx.SetMacroCall(prev.ID(), hasMacro) + return + } + + ctx.ClearMacroCall(prev.ID()) + if inlinedType.IsAssignableType(NullType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + inlined, + ctx.NewLiteral(types.NullValue), + )) + return + } + if inlinedType.HasTrait(traits.SizerType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + ctx.NewMemberCall(overloads.Size, inlined), + ctx.NewLiteral(types.IntZero), + )) + return + } + ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType) +} + +// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression +// being replaced occurs within a presence test. Value types with a size() method or field selection +// support can be bound. +// +// In future iterations, support may also be added for indexer types which can be rewritten as an `in` +// expression; however, this would imply a rewrite of the inlined expression that may not be necessary +// in most cases. +func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool { + if inlinedType.IsAssignableType(NullType) || + inlinedType.HasTrait(traits.SizerType) { + return true + } + for _, m := range matches { + if m.Kind() != ast.SelectKind { + continue + } + sel := m.AsSelect() + if sel.IsTestOnly() { + return false + } + } + return true +} + +// matchVariable matches simple identifiers, select expressions, and presence test expressions +// which match the (potentially) qualified variable name provided as input. +// +// Note, this function does not support inlining against select expressions which includes optional +// field selection. This may be a future refinement. +func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + return true + } + if e.Kind() == ast.SelectKind { + sel := e.AsSelect() + // While the `ToQualifiedName` call could take the select directly, this + // would skip presence tests from possible matches, which we would like + // to include. + qualName, found := containers.ToQualifiedName(sel.Operand()) + return found && qualName+"."+sel.FieldName() == varName + } + return false + } +} diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go index 80f63140e3..3133fb9d7d 100644 --- a/vendor/github.com/google/cel-go/cel/io.go +++ b/vendor/github.com/google/cel-go/cel/io.go @@ -47,17 +47,11 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { // // Prefer CheckedExprToAst if loading expressions from storage. func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) { - checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr) + checked, err := ast.ToAST(checkedExpr) if err != nil { return nil, err } - return &Ast{ - expr: checkedAST.Expr, - info: checkedAST.SourceInfo, - source: src, - refMap: checkedAST.ReferenceMap, - typeMap: checkedAST.TypeMap, - }, nil + return &Ast{source: src, impl: checked}, nil } // AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. @@ -67,13 +61,7 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { if !a.IsChecked() { return nil, fmt.Errorf("cannot convert unchecked ast") } - cAst := &ast.CheckedAST{ - Expr: a.expr, - SourceInfo: a.info, - ReferenceMap: a.refMap, - TypeMap: a.typeMap, - } - return ast.CheckedASTToCheckedExpr(cAst) + return ast.ToProto(a.impl) } // ParsedExprToAst converts a parsed expression proto message to an Ast. @@ -89,18 +77,12 @@ func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast { // // Prefer ParsedExprToAst if loading expressions from storage. func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast { - si := parsedExpr.GetSourceInfo() - if si == nil { - si = &exprpb.SourceInfo{} - } + info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo()) if src == nil { - src = common.NewInfoSource(si) - } - return &Ast{ - expr: parsedExpr.GetExpr(), - info: si, - source: src, + src = common.NewInfoSource(parsedExpr.GetSourceInfo()) } + e, _ := ast.ProtoToExpr(parsedExpr.GetExpr()) + return &Ast{source: src, impl: ast.NewAST(e, info)} } // AstToParsedExpr converts an Ast to an protobuf ParsedExpr value. @@ -116,9 +98,7 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) { // Note, the conversion may not be an exact replica of the original expression, but will produce // a string that is semantically equivalent and whose textual representation is stable. func AstToString(a *Ast) (string, error) { - expr := a.Expr() - info := a.SourceInfo() - return parser.Unparse(expr, info) + return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo()) } // RefValueToValue converts between ref.Val and api.expr.Value. diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index 4d232085c2..be59f1b028 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -20,6 +20,7 @@ import ( "strings" "time" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/stdlib" @@ -28,8 +29,6 @@ import ( "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/interpreter" "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) const ( @@ -313,7 +312,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption { Types(types.OptionalType), // Configure the optMap and optFlatMap macros. - Macros(NewReceiverMacro(optMapMacro, 2, optMap)), + Macros(ReceiverMacro(optMapMacro, 2, optMap)), // Global and member functions for working with optional values. Function(optionalOfFunc, @@ -374,7 +373,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption { Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), } if lib.version >= 1 { - opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap))) + opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap))) } return opts } @@ -386,57 +385,57 @@ func (lib *optionalLib) ProgramOptions() []ProgramOption { } } -func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { varIdent := args[0] varName := "" - switch varIdent.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - varName = varIdent.GetIdentExpr().GetName() + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() default: - return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier") + return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier") } mapExpr := args[1] - return meh.GlobalCall( + return meh.NewCall( operators.Conditional, - meh.ReceiverCall(hasValueFunc, target), - meh.GlobalCall(optionalOfFunc, - meh.Fold( - unusedIterVar, + meh.NewMemberCall(hasValueFunc, target), + meh.NewCall(optionalOfFunc, + meh.NewComprehension( meh.NewList(), + unusedIterVar, varName, - meh.ReceiverCall(valueFunc, target), - meh.LiteralBool(false), - meh.Ident(varName), + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), mapExpr, ), ), - meh.GlobalCall(optionalNoneFunc), + meh.NewCall(optionalNoneFunc), ), nil } -func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { varIdent := args[0] varName := "" - switch varIdent.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - varName = varIdent.GetIdentExpr().GetName() + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() default: - return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier") + return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier") } mapExpr := args[1] - return meh.GlobalCall( + return meh.NewCall( operators.Conditional, - meh.ReceiverCall(hasValueFunc, target), - meh.Fold( - unusedIterVar, + meh.NewMemberCall(hasValueFunc, target), + meh.NewComprehension( meh.NewList(), + unusedIterVar, varName, - meh.ReceiverCall(valueFunc, target), - meh.LiteralBool(false), - meh.Ident(varName), + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), mapExpr, ), - meh.GlobalCall(optionalNoneFunc), + meh.NewCall(optionalNoneFunc), ), nil } @@ -447,6 +446,12 @@ func enableOptionalSyntax() EnvOption { } } +// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field +// selection is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) EnvOption { + return features(featureEnableErrorOnBadPresenceTest, value) +} + func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) { call, ok := i.(interpreter.InterpretableCall) if !ok { diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go index 1eb414c8be..4db1fd57a9 100644 --- a/vendor/github.com/google/cel-go/cel/macro.go +++ b/vendor/github.com/google/cel-go/cel/macro.go @@ -15,6 +15,11 @@ package cel import ( + "fmt" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -26,7 +31,14 @@ import ( // a Macro should be created per arg-count or as a var arg macro. type Macro = parser.Macro -// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree. +// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value. +type MacroFactory = parser.MacroExpander + +// MacroExprFactory assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. +type MacroExprFactory = parser.ExprHelper + +// MacroExpander converts a call and its associated arguments into a protobuf Expr representation. // // If the MacroExpander determines within the implementation that an expansion is not needed it may return // a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments @@ -36,48 +48,197 @@ type Macro = parser.Macro // and produces as output an Expr ast node. // // Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. -type MacroExpander = parser.MacroExpander +type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) // MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree. -type MacroExprHelper = parser.ExprHelper +// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is +// consistent with the source position and expression id generation code leveraged by both +// the parser and type-checker. +type MacroExprHelper interface { + // Copy the input expression with a brand new set of identifiers. + Copy(*exprpb.Expr) *exprpb.Expr + + // LiteralBool creates an Expr value for a bool literal. + LiteralBool(value bool) *exprpb.Expr + + // LiteralBytes creates an Expr value for a byte literal. + LiteralBytes(value []byte) *exprpb.Expr + + // LiteralDouble creates an Expr value for double literal. + LiteralDouble(value float64) *exprpb.Expr + + // LiteralInt creates an Expr value for an int literal. + LiteralInt(value int64) *exprpb.Expr + + // LiteralString creates am Expr value for a string literal. + LiteralString(value string) *exprpb.Expr + + // LiteralUint creates an Expr value for a uint literal. + LiteralUint(value uint64) *exprpb.Expr + + // NewList creates a CreateList instruction where the list is comprised of the optional set + // of elements provided as arguments. + NewList(elems ...*exprpb.Expr) *exprpb.Expr + + // NewMap creates a CreateStruct instruction for a map where the map is comprised of the + // optional set of key, value entries. + NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewMapEntry creates a Map Entry for the key, value pair. + NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // NewObject creates a CreateStruct instruction for an object with a given type name and + // optional set of field initializers. + NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewObjectFieldInit creates a new Object field initializer from the field name and value. + NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // Fold creates a fold comprehension instruction. + // + // - iterVar is the iteration variable name. + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr + + // Ident creates an identifier Expr value. + Ident(name string) *exprpb.Expr + + // AccuIdent returns an accumulator identifier for use with comprehension results. + AccuIdent() *exprpb.Expr + + // GlobalCall creates a function call Expr value for a global (free) function. + GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr + + // ReceiverCall creates a function call Expr value for a receiver-style function. + ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr + + // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr + + // Select create a field traversal Expr value. + Select(operand *exprpb.Expr, field string) *exprpb.Expr + + // OffsetLocation returns the Location of the expression identifier. + OffsetLocation(exprID int64) common.Location + + // NewError associates an error message with a given expression id. + NewError(exprID int64, message string) *Error +} + +// GlobalMacro creates a Macro for a global function with the specified arg count. +func GlobalMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewGlobalMacro(function, argCount, factory) +} + +// ReceiverMacro creates a Macro for a receiver function matching the specified arg count. +func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewReceiverMacro(function, argCount, factory) +} + +// GlobalVarArgMacro creates a Macro for a global function with a variable arg count. +func GlobalVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewGlobalVarArgMacro(function, factory) +} + +// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +func ReceiverVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewReceiverVarArgMacro(function, factory) +} // NewGlobalMacro creates a Macro for a global function with the specified arg count. +// +// Deprecated: use GlobalMacro func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro { - return parser.NewGlobalMacro(function, argCount, expander) + expand := adaptingExpander{expander} + return parser.NewGlobalMacro(function, argCount, expand.Expander) } // NewReceiverMacro creates a Macro for a receiver function matching the specified arg count. +// +// Deprecated: use ReceiverMacro func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro { - return parser.NewReceiverMacro(function, argCount, expander) + expand := adaptingExpander{expander} + return parser.NewReceiverMacro(function, argCount, expand.Expander) } // NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count. +// +// Deprecated: use GlobalVarArgMacro func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro { - return parser.NewGlobalVarArgMacro(function, expander) + expand := adaptingExpander{expander} + return parser.NewGlobalVarArgMacro(function, expand.Expander) } // NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +// +// Deprecated: use ReceiverVarArgMacro func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { - return parser.NewReceiverVarArgMacro(function, expander) + expand := adaptingExpander{expander} + return parser.NewReceiverVarArgMacro(function, expand.Expander) } // HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field) func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeHas(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + arg, err := adaptToExpr(args[0]) + if err != nil { + return nil, err + } + if arg.Kind() == ast.SelectKind { + s := arg.AsSelect() + return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName())) + } + return nil, ph.NewError(arg.ID(), "invalid argument to has() macro") } // ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the // elements in the range match the predicate expressions: // .exists(, ) func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeExists(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } // ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly // one of the elements in the range match the predicate expressions: // .exists_one(, ) func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeExistsOne(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } // MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the @@ -91,14 +252,30 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex // In the second form only iterVar values which return true when provided to the predicate expression // are transformed. func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeMap(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } // FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains // only elements which match the provided predicate expression: // .filter(, ) func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeFilter(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } var ( @@ -142,3 +319,258 @@ var ( // NoMacros provides an alias to an empty list of macros NoMacros = []Macro{} ) + +type adaptingExpander struct { + legacyExpander MacroExpander +} + +func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + var legacyTarget *exprpb.Expr = nil + var err *Error = nil + if target != nil { + legacyTarget, err = adaptToProto(target) + if err != nil { + return nil, err + } + } + legacyArgs := make([]*exprpb.Expr, len(args)) + for i, arg := range args { + legacyArgs[i], err = adaptToProto(arg) + if err != nil { + return nil, err + } + } + ah := &adaptingHelper{modernHelper: eh} + legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs) + if err != nil { + return nil, err + } + ex, err := adaptToExpr(legacyExpr) + if err != nil { + return nil, err + } + return ex, nil +} + +func wrapErr(id int64, message string, err error) *common.Error { + return &common.Error{ + Location: common.NoLocation, + Message: fmt.Sprintf("%s: %v", message, err), + ExprID: id, + } +} + +type adaptingHelper struct { + modernHelper parser.ExprHelper +} + +// Copy the input expression with a brand new set of identifiers. +func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e))) +} + +// LiteralBool creates an Expr value for a bool literal. +func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value))) +} + +// LiteralBytes creates an Expr value for a byte literal. +func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value))) +} + +// LiteralDouble creates an Expr value for double literal. +func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value))) +} + +// LiteralInt creates an Expr value for an int literal. +func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value))) +} + +// LiteralString creates am Expr value for a string literal. +func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value))) +} + +// LiteralUint creates an Expr value for a uint literal. +func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value))) +} + +// NewList creates a CreateList instruction where the list is comprised of the optional set +// of elements provided as arguments. +func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...)) +} + +// NewMap creates a CreateStruct instruction for a map where the map is comprised of the +// optional set of key, value entries. +func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(entries)) + for i, e := range entries { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...)) +} + +// NewMapEntry creates a Map Entry for the key, value pair. +func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional)) +} + +// NewObject creates a CreateStruct instruction for an object with a given type name and +// optional set of field initializers. +func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(fieldInits)) + for i, e := range fieldInits { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...)) +} + +// NewObjectFieldInit creates a new Object field initializer from the field name and value. +func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional)) +} + +// Fold creates a fold comprehension instruction. +// +// - iterVar is the iteration variable name. +// - iterRange represents the expression that resolves to a list or map where the elements or +// keys (respectively) will be iterated over. +// - accuVar is the accumulation variable name, typically parser.AccumulatorName. +// - accuInit is the initial expression whose value will be set for the accuVar prior to +// folding. +// - condition is the expression to test to determine whether to continue folding. +// - step is the expression to evaluation at the conclusion of a single fold iteration. +// - result is the computation to evaluate at the conclusion of the fold. +// +// The accuVar should not shadow variable names that you would like to reference within the +// environment in the step and condition expressions. Presently, the name __result__ is commonly +// used by built-in macros but this may change in the future. +func (ah *adaptingHelper) Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewComprehension( + mustAdaptToExpr(iterRange), + iterVar, + accuVar, + mustAdaptToExpr(accuInit), + mustAdaptToExpr(condition), + mustAdaptToExpr(step), + mustAdaptToExpr(result), + ), + ) +} + +// Ident creates an identifier Expr value. +func (ah *adaptingHelper) Ident(name string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewIdent(name)) +} + +// AccuIdent returns an accumulator identifier for use with comprehension results. +func (ah *adaptingHelper) AccuIdent() *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewAccuIdent()) +} + +// GlobalCall creates a function call Expr value for a global (free) function. +func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...)) +} + +// ReceiverCall creates a function call Expr value for a receiver-style function. +func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...)) +} + +// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. +func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field)) +} + +// Select create a field traversal Expr value. +func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewSelect(op, field)) +} + +// OffsetLocation returns the Location of the expression identifier. +func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location { + return ah.modernHelper.OffsetLocation(exprID) +} + +// NewError associates an error message with a given expression id. +func (ah *adaptingHelper) NewError(exprID int64, message string) *Error { + return ah.modernHelper.NewError(exprID, message) +} + +func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr { + adapted := make([]ast.Expr, len(exprs)) + for i, e := range exprs { + adapted[i] = mustAdaptToExpr(e) + } + return adapted +} + +func mustAdaptToExpr(e *exprpb.Expr) ast.Expr { + out, _ := adaptToExpr(e) + return out +} + +func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ProtoToExpr(e) + if err != nil { + return nil, wrapErr(e.GetId(), "proto conversion failure", err) + } + return out, nil +} + +func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr { + out, _ := ast.ProtoToEntryExpr(e) + return out +} + +func mustAdaptToProto(e ast.Expr) *exprpb.Expr { + out, _ := adaptToProto(e) + return out +} + +func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ExprToProto(e) + if err != nil { + return nil, wrapErr(e.ID(), "expr conversion failure", err) + } + return out, nil +} + +func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry { + out, _ := ast.EntryExprToProto(e) + return out +} + +func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) { + ah, ok := meh.(*adaptingHelper) + if !ok { + return nil, common.NewError(0, + fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh), + common.NoLocation) + } + return ah.modernHelper, nil +} diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go new file mode 100644 index 0000000000..ec02773a45 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/optimizer.go @@ -0,0 +1,525 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "sort" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order. +// +// The static optimizer normalizes expression ids and type-checking run between optimization +// passes to ensure that the final optimized output is a valid expression with metadata consistent +// with what would have been generated from a parsed and checked expression. +// +// Note: source position information is best-effort and likely wrong, but optimized expressions +// should be suitable for calls to parser.Unparse. +type StaticOptimizer struct { + optimizers []ASTOptimizer +} + +// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied +// to a checked expression. +func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { + return &StaticOptimizer{ + optimizers: optimizers, + } +} + +// Optimize applies a sequence of optimizations to an Ast within a given environment. +// +// If issues are encountered, the Issues.Err() return value will be non-nil. +func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { + // Make a copy of the AST to be optimized. + optimized := ast.Copy(a.impl) + ids := newIDGenerator(ast.MaxID(a.impl)) + + // Create the optimizer context, could be pooled in the future. + issues := NewIssues(common.NewErrors(a.Source())) + baseFac := ast.NewExprFactory() + exprFac := &optimizerExprFactory{ + idGenerator: ids, + fac: baseFac, + sourceInfo: optimized.SourceInfo(), + } + ctx := &OptimizerContext{ + optimizerExprFactory: exprFac, + Env: env, + Issues: issues, + } + + // Apply the optimizations sequentially. + for _, o := range opt.optimizers { + optimized = o.Optimize(ctx, optimized) + if issues.Err() != nil { + return nil, issues + } + // Normalize expression id metadata including coordination with macro call metadata. + freshIDGen := newIDGenerator(0) + info := optimized.SourceInfo() + expr := optimized.Expr() + normalizeIDs(freshIDGen.renumberStable, expr, info) + cleanupMacroRefs(expr, info) + + // Recheck the updated expression for any possible type-agreement or validation errors. + parsed := &Ast{ + source: a.Source(), + impl: ast.NewAST(expr, info)} + checked, iss := ctx.Check(parsed) + if iss.Err() != nil { + return nil, iss + } + optimized = checked.impl + } + + // Return the optimized result. + return &Ast{ + source: a.Source(), + impl: optimized, + }, nil +} + +// normalizeIDs ensures that the metadata present with an AST is reset in a manner such +// that the ids within the expression correspond to the ids within macros. +func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { + optimized.RenumberIDs(idGen) + if len(info.MacroCalls()) == 0 { + return + } + + // Sort the macro ids to make sure that the renumbering of macro-specific variables + // is stable across normalization calls. + sortedMacroIDs := []int64{} + for id := range info.MacroCalls() { + sortedMacroIDs = append(sortedMacroIDs, id) + } + sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] }) + + // First, update the macro call ids themselves. + callIDMap := map[int64]int64{} + for _, id := range sortedMacroIDs { + callIDMap[id] = idGen(id) + } + // Then update the macro call definitions which refer to these ids, but + // ensure that the updates don't collide and remove macro entries which haven't + // been visited / updated yet. + type macroUpdate struct { + id int64 + call ast.Expr + } + macroUpdates := []macroUpdate{} + for _, oldID := range sortedMacroIDs { + newID := callIDMap[oldID] + call, found := info.GetMacroCall(oldID) + if !found { + continue + } + call.RenumberIDs(idGen) + macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call}) + info.ClearMacroCall(oldID) + } + for _, u := range macroUpdates { + info.SetMacroCall(u.id, u.call) + } +} + +func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) { + if len(info.MacroCalls()) == 0 { + return + } + + // Sanitize the macro call references once the optimized expression has been computed + // and the ids normalized between the expression and the macros. + exprRefMap := make(map[int64]struct{}) + ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + // Update the macro call id references to ensure that macro pointers are + // updated consistently across macros. + for _, call := range info.MacroCalls() { + ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + } + for id := range info.MacroCalls() { + if _, found := exprRefMap[id]; !found { + info.ClearMacroCall(id) + } + } +} + +// newIDGenerator ensures that new ids are only created the first time they are encountered. +func newIDGenerator(seed int64) *idGenerator { + return &idGenerator{ + idMap: make(map[int64]int64), + seed: seed, + } +} + +type idGenerator struct { + idMap map[int64]int64 + seed int64 +} + +func (gen *idGenerator) nextID() int64 { + gen.seed++ + return gen.seed +} + +func (gen *idGenerator) renumberStable(id int64) int64 { + if id == 0 { + return 0 + } + if newID, found := gen.idMap[id]; found { + return newID + } + nextID := gen.nextID() + gen.idMap[id] = nextID + return nextID +} + +// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate +// subexpressions and report any errors encountered along the way. The context also embeds the +// optimizerExprFactory which can be used to generate new sub-expressions with expression ids +// consistent with the expectations of a parsed expression. +type OptimizerContext struct { + *Env + *optimizerExprFactory + *Issues +} + +// ASTOptimizer applies an optimization over an AST and returns the optimized result. +type ASTOptimizer interface { + // Optimize optimizes a type-checked AST within an Environment and accumulates any issues. + Optimize(*OptimizerContext, *ast.AST) *ast.AST +} + +type optimizerExprFactory struct { + *idGenerator + fac ast.ExprFactory + sourceInfo *ast.SourceInfo +} + +// NewAST creates an AST from the current expression using the tracked source info which +// is modified and managed by the OptimizerContext. +func (opt *optimizerExprFactory) NewAST(expr ast.Expr) *ast.AST { + return ast.NewAST(expr, opt.sourceInfo) +} + +// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the +// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions +// between copies. +// +// Use this method before attempting to merge the expression from AST into another. +func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) { + idGen := newIDGenerator(opt.nextID()) + defer func() { opt.seed = idGen.nextID() }() + copyExpr := opt.fac.CopyExpr(a.Expr()) + copyInfo := ast.CopySourceInfo(a.SourceInfo()) + normalizeIDs(idGen.renumberStable, copyExpr, copyInfo) + return copyExpr, copyInfo +} + +// CopyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being +// optimized. +func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr { + copyExpr, copyInfo := opt.CopyAST(a) + for macroID, call := range copyInfo.MacroCalls() { + opt.SetMacroCall(macroID, call) + } + return copyExpr +} + +// ClearMacroCall clears the macro at the given expression id. +func (opt *optimizerExprFactory) ClearMacroCall(id int64) { + opt.sourceInfo.ClearMacroCall(id) +} + +// SetMacroCall sets the macro call metadata for the given macro id within the tracked source info +// metadata. +func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) { + opt.sourceInfo.SetMacroCall(id, expr) +} + +// MacroCalls returns the map of macro calls currently in the context. +func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr { + return opt.sourceInfo.MacroCalls() +} + +// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression +// representing the unexpanded call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) { + varID := opt.nextID() + remainingID := opt.nextID() + remaining = opt.fac.CopyExpr(remaining) + remaining.RenumberIDs(func(id int64) int64 { + if id == macroID { + return remainingID + } + return id + }) + if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists { + opt.SetMacroCall(remainingID, opt.fac.CopyExpr(call)) + } + + astExpr = opt.fac.NewComprehension(macroID, + opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}), + "#unused", + varName, + opt.fac.CopyExpr(varInit), + opt.fac.NewLiteral(opt.nextID(), types.False), + opt.fac.NewIdent(varID, varName), + remaining) + + macroExpr = opt.fac.NewMemberCall(0, "bind", + opt.fac.NewIdent(opt.nextID(), "cel"), + opt.fac.NewIdent(varID, varName), + opt.fac.CopyExpr(varInit), + opt.fac.CopyExpr(remaining)) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewCall creates a global function call invocation expression. +// +// Example: +// +// countByField(list, fieldName) +// - function: countByField +// - args: [list, fieldName] +func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr { + return opt.fac.NewCall(opt.nextID(), function, args...) +} + +// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call. +// +// Example: +// +// list.countByField(fieldName) +// - function: countByField +// - target: list +// - args: [fieldName] +func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return opt.fac.NewMemberCall(opt.nextID(), function, target, args...) +} + +// NewIdent creates a new identifier expression. +// +// Examples: +// +// - simple_var_name +// - qualified.subpackage.var_name +func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr { + return opt.fac.NewIdent(opt.nextID(), name) +} + +// NewLiteral creates a new literal expression value. +// +// The range of valid values for a literal generated during optimization is different than for expressions +// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can +// be converted back to a literal-like form. +func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr { + return opt.fac.NewLiteral(opt.nextID(), value) +} + +// NewList creates a list expression with a set of optional indices. +// +// Examples: +// +// [a, b] +// - elems: [a, b] +// - optIndices: [] +// +// [a, ?b, ?c] +// - elems: [a, b, c] +// - optIndices: [1, 2] +func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr { + return opt.fac.NewList(opt.nextID(), elems, optIndices) +} + +// NewMap creates a map from a set of entry expressions which contain a key and value expression. +func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr { + return opt.fac.NewMap(opt.nextID(), entries) +} + +// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the +// entry is optional. +// +// Examples: +// +// {a: b} +// - key: a +// - value: b +// - optional: false +// +// {?a: ?b} +// - key: a +// - value: b +// - optional: true +func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional) +} + +// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded +// has() macro call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) { + sel := s.AsSelect() + astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName()) + macroExpr = opt.fac.NewCall(0, "has", + opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName())) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewSelect creates a select expression where a field value is selected from an operand. +// +// Example: +// +// msg.field_name +// - operand: msg +// - field: field_name +func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr { + return opt.fac.NewSelect(opt.nextID(), operand, field) +} + +// NewStruct creates a new typed struct value with an set of field initializations. +// +// Example: +// +// pkg.TypeName{field: value} +// - typeName: pkg.TypeName +// - fields: [{field: value}] +func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr { + return opt.fac.NewStruct(opt.nextID(), typeName, fields) +} + +// NewStructField creates a struct field initialization. +// +// Examples: +// +// {count: 3u} +// - field: count +// - value: 3u +// - optional: false +// +// {?count: x} +// - field: count +// - value: x +// - optional: true +func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewStructField(opt.nextID(), field, value, isOptional) +} + +// UpdateExpr updates the target expression with the updated content while preserving macro metadata. +// +// There are four scenarios during the update to consider: +// 1. target is not macro, updated is not macro +// 2. target is macro, updated is not macro +// 3. target is macro, updated is macro +// 4. target is not macro, updated is macro +// +// When the target is a macro already, it may either be updated to a new macro function +// body if the update is also a macro, or it may be removed altogether if the update is +// a macro. +// +// When the update is a macro, then the target references within other macros must be +// updated to point to the new updated macro. Otherwise, other macros which pointed to +// the target body must be replaced with copies of the updated expression body. +func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) { + // Update the expression + target.SetKindCase(updated) + + // Early return if there's no macros present sa the source info reflects the + // macro set from the target and updated expressions. + if len(opt.sourceInfo.MacroCalls()) == 0 { + return + } + // Determine whether the target expression was a macro. + _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID()) + + // Determine whether the updated expression was a macro. + updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID()) + + if updatedIsMacro { + // If the updated call was a macro, then updated id maps to target id, + // and the updated macro moves into the target id slot. + opt.sourceInfo.ClearMacroCall(updated.ID()) + opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro) + } else if targetIsMacro { + // Otherwise if the target expr was a macro, but is no longer, clear + // the macro reference. + opt.sourceInfo.ClearMacroCall(target.ID()) + } + + // Punch holes in the updated value where macros references exist. + macroExpr := opt.fac.CopyExpr(target) + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) + + // Update any references to the expression within a macro + macroVisitor := ast.NewExprVisitor(func(call ast.Expr) { + // Update the target expression to point to the macro expression which + // will be empty if the updated expression was a macro. + if call.ID() == target.ID() { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Update the macro call expression if it refers to the updated expression + // id which has since been remapped to the target id. + if call.ID() == updated.ID() { + // Either ensure the expression is a macro reference or a populated with + // the relevant sub-expression if the updated expr was not a macro. + if updatedIsMacro { + call.SetKindCase(nil) + } else { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Since SetKindCase does not renumber the id, ensure the references to + // the old 'updated' id are mapped to the target id. + call.RenumberIDs(func(id int64) int64 { + if id == updated.ID() { + return target.ID() + } + return id + }) + } + }) + for _, call := range opt.sourceInfo.MacroCalls() { + ast.PostOrderVisit(call, macroVisitor) + } +} + +func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) { + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) +} diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go index 05867730d3..69c6942634 100644 --- a/vendor/github.com/google/cel-go/cel/options.go +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -61,6 +61,10 @@ const ( // compressing the logic graph to a single call when multiple like-operator // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) featureVariadicLogicalASTs + + // Enable error generation when a presence test or optional field selection is + // performed on a primitive type. + featureEnableErrorOnBadPresenceTest ) // EnvOption is a functional interface for configuring the environment. @@ -243,6 +247,13 @@ func Abbrevs(qualifiedNames ...string) EnvOption { } } +// customTypeRegistry is an internal-only interface containing the minimum methods required to support +// custom types. It is a subset of methods from ref.TypeRegistry. +type customTypeRegistry interface { + RegisterDescriptor(protoreflect.FileDescriptor) error + RegisterType(...ref.Type) error +} + // Types adds one or more type declarations to the environment, allowing for construction of // type-literals whose definitions are included in the common expression built-in set. // @@ -255,12 +266,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { - var reg ref.TypeRegistry - var isReg bool - reg, isReg = e.provider.(*types.Registry) - if !isReg { - reg, isReg = e.provider.(ref.TypeRegistry) - } + reg, isReg := e.provider.(customTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -297,7 +303,7 @@ func Types(addTypes ...any) EnvOption { // extension or by re-using the same EnvOption with another NewEnv() call. func TypeDescs(descs ...any) EnvOption { return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(ref.TypeRegistry) + reg, isReg := e.provider.(customTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -345,7 +351,7 @@ func TypeDescs(descs ...any) EnvOption { } } -func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error { +func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { files, err := protodesc.NewFiles(fileSet) if err != nil { return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) @@ -353,7 +359,7 @@ func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) er return registerFiles(reg, files) } -func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error { +func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { var err error files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { err = reg.RegisterDescriptor(fd) @@ -448,6 +454,8 @@ const ( OptTrackCost EvalOption = 1 << iota // OptCheckStringFormat enables compile-time checking of string.format calls for syntax/cardinality. + // + // Deprecated: use ext.StringsValidateFormatCalls() as this option is now a no-op. OptCheckStringFormat EvalOption = 1 << iota ) diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index 2dd72f7501..4d34305c7d 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -19,7 +19,6 @@ import ( "fmt" "sync" - celast "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -152,7 +151,7 @@ func (p *prog) clone() *prog { // ProgramOption values. // // If the program cannot be configured the prog will be nil, with a non-nil error response. -func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { +func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { // Build the dispatcher, interpreter, and default program value. disp := interpreter.NewDispatcher() @@ -188,10 +187,13 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { // Set the attribute factory after the options have been set. var attrFactory interpreter.AttributeFactory + attrFactorOpts := []interpreter.AttrFactoryOption{ + interpreter.EnableErrorOnBadPresenceTest(p.HasFeature(featureEnableErrorOnBadPresenceTest)), + } if p.evalOpts&OptPartialEval == OptPartialEval { - attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider) + attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } else { - attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider) + attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, attrFactory) p.interpreter = interp @@ -213,34 +215,6 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { if len(p.regexOptimizations) > 0 { decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...)) } - // Enable compile-time checking of syntax/cardinality for string.format calls. - if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat { - var isValidType func(id int64, validTypes ...ref.Type) (bool, error) - if ast.IsChecked() { - isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { - t := ast.typeMap[id] - if t.Kind() == DynKind { - return true, nil - } - for _, vt := range validTypes { - k, err := typeValueToKind(vt) - if err != nil { - return false, err - } - if t.Kind() == k { - return true, nil - } - } - return false, nil - } - } else { - // if the AST isn't type-checked, short-circuit validation - isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { - return true, nil - } - } - decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType)) - } // Enable exhaustive eval, state tracking and cost tracking last since they require a factory. if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 { @@ -274,33 +248,16 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { decs = append(decs, interpreter.Observe(observers...)) } - return p.clone().initInterpretable(ast, decs) + return p.clone().initInterpretable(a, decs) } return newProgGen(factory) } - return p.initInterpretable(ast, decorators) + return p.initInterpretable(a, decorators) } -func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) { - // Unchecked programs do not contain type and reference information and may be slower to execute. - if !ast.IsChecked() { - interpretable, err := - p.interpreter.NewUncheckedInterpretable(ast.Expr(), decs...) - if err != nil { - return nil, err - } - p.interpretable = interpretable - return p, nil - } - - // When the AST has been checked it contains metadata that can be used to speed up program execution. - checked := &celast.CheckedAST{ - Expr: ast.Expr(), - SourceInfo: ast.SourceInfo(), - TypeMap: ast.typeMap, - ReferenceMap: ast.refMap, - } - interpretable, err := p.interpreter.NewInterpretable(checked, decs...) +func (p *prog) initInterpretable(a *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) { + // When the AST has been exprAST it contains metadata that can be used to speed up program execution. + interpretable, err := p.interpreter.NewInterpretable(a.impl, decs...) if err != nil { return nil, err } @@ -580,8 +537,6 @@ func (p *evalActivationPool) Put(value any) { } var ( - emptyEvalState = interpreter.NewEvalState() - // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs activationPool = newEvalActivationPool() diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go index 78b3113818..b50c674520 100644 --- a/vendor/github.com/google/cel-go/cel/validator.go +++ b/vendor/github.com/google/cel-go/cel/validator.go @@ -21,8 +21,6 @@ import ( "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/overloads" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) const ( @@ -69,7 +67,7 @@ type ASTValidator interface { // // See individual validators for more information on their configuration keys and configuration // properties. - Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues) + Validate(*Env, ValidatorConfig, *ast.AST, *Issues) } // ValidatorConfig provides an accessor method for querying validator configuration state. @@ -180,7 +178,7 @@ func ValidateComprehensionNestingLimit(limit int) ASTValidator { return nestingLimitValidator{limit: limit} } -type argChecker func(env *Env, call, arg ast.NavigableExpr) error +type argChecker func(env *Env, call, arg ast.Expr) error func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator { return formatValidator{ @@ -203,8 +201,8 @@ func (v formatValidator) Name() string { // Validate searches the AST for uses of a given function name with a constant argument and performs a check // on whether the argument is a valid literal value. -func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { - root := ast.NavigateCheckedAST(a) +func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName)) for _, call := range funcCalls { callArgs := call.AsCall().Args() @@ -221,8 +219,8 @@ func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, } } -func evalCall(env *Env, call, arg ast.NavigableExpr) error { - ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()}) +func evalCall(env *Env, call, arg ast.Expr) error { + ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))} prg, err := env.Program(ast) if err != nil { return err @@ -231,7 +229,7 @@ func evalCall(env *Env, call, arg ast.NavigableExpr) error { return err } -func compileRegex(_ *Env, _, arg ast.NavigableExpr) error { +func compileRegex(_ *Env, _, arg ast.Expr) error { pattern := arg.AsLiteral().Value().(string) _, err := regexp.Compile(pattern) return err @@ -244,25 +242,14 @@ func (homogeneousAggregateLiteralValidator) Name() string { return homogeneousValidatorName } -// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard -// and exempt functions from homogeneous aggregate literal checks. -// -// TODO: Move this call into the string.format() ASTValidator once ported. -func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error { - emptyList := []string{} - exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string) - exemptFunctions = append(exemptFunctions, "format") - return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions) -} - // Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types. // // This validator makes an exception for list and map literals which occur at any level of nesting within // string format calls. -func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) { +func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) { var exemptedFunctions []string exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string) - root := ast.NavigateCheckedAST(a) + root := ast.NavigateAST(a) listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind)) for _, listExpr := range listExprs { if inExemptFunction(listExpr, exemptedFunctions) { @@ -273,7 +260,7 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig optIndices := l.OptionalIndices() var elemType *Type for i, e := range elements { - et := e.Type() + et := a.GetType(e.ID()) if isOptionalIndex(i, optIndices) { et = et.Parameters()[0] } @@ -296,9 +283,10 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig entries := m.Entries() var keyType, valType *Type for _, e := range entries { - key, val := e.Key(), e.Value() - kt, vt := key.Type(), val.Type() - if e.IsOptional() { + mapEntry := e.AsMapEntry() + key, val := mapEntry.Key(), mapEntry.Value() + kt, vt := a.GetType(key.ID()), a.GetType(val.ID()) + if mapEntry.IsOptional() { vt = vt.Parameters()[0] } if keyType == nil && valType == nil { @@ -316,7 +304,8 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig } func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { - if parent, found := e.Parent(); found { + parent, found := e.Parent() + for found { if parent.Kind() == ast.CallKind { fnName := parent.AsCall().FunctionName() for _, exempt := range exemptFunctions { @@ -325,9 +314,7 @@ func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { } } } - if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind { - return inExemptFunction(parent, exemptFunctions) - } + parent, found = parent.Parent() } return false } @@ -353,8 +340,8 @@ func (v nestingLimitValidator) Name() string { return "cel.lib.std.validate.comprehension_nesting_limit" } -func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { - root := ast.NavigateCheckedAST(a) +func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) if len(comprehensions) <= v.limit { return diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel index 0459d35239..678b412a95 100644 --- a/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -16,7 +16,6 @@ go_library( "options.go", "printer.go", "scopes.go", - "standard.go", "types.go", ], importpath = "github.com/google/cel-go/checker", @@ -60,7 +59,6 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index 720e4fa968..57fb3ce5ea 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -18,6 +18,7 @@ package checker import ( "fmt" + "reflect" "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" @@ -25,139 +26,98 @@ import ( "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types/ref" ) type checker struct { + *ast.AST + ast.ExprFactory env *Env errors *typeErrors mappings *mapping freeTypeVarCounter int - sourceInfo *exprpb.SourceInfo - types map[int64]*types.Type - references map[int64]*ast.ReferenceInfo } // Check performs type checking, giving a typed AST. -// The input is a ParsedExpr proto and an env which encapsulates -// type binding of variables, declarations of built-in functions, -// descriptions of protocol buffers, and a registry for errors. -// Returns a CheckedExpr proto, which might not be usable if -// there are errors in the error registry. -func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) { +// +// The input is a parsed AST and an env which encapsulates type binding of variables, +// declarations of built-in functions, descriptions of protocol buffers, and a registry for +// errors. +// +// Returns a type-checked AST, which might not be usable if there are errors in the error +// registry. +func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) { errs := common.NewErrors(source) + typeMap := make(map[int64]*types.Type) + refMap := make(map[int64]*ast.ReferenceInfo) c := checker{ + AST: ast.NewCheckedAST(parsed, typeMap, refMap), + ExprFactory: ast.NewExprFactory(), env: env, errors: &typeErrors{errs: errs}, mappings: newMapping(), freeTypeVarCounter: 0, - sourceInfo: parsedExpr.GetSourceInfo(), - types: make(map[int64]*types.Type), - references: make(map[int64]*ast.ReferenceInfo), } - c.check(parsedExpr.GetExpr()) + c.check(c.Expr()) - // Walk over the final type map substituting any type parameters either by their bound value or - // by DYN. - m := make(map[int64]*types.Type) - for id, t := range c.types { - m[id] = substitute(c.mappings, t, true) + // Walk over the final type map substituting any type parameters either by their bound value + // or by DYN. + for id, t := range c.TypeMap() { + c.SetType(id, substitute(c.mappings, t, true)) } - - return &ast.CheckedAST{ - Expr: parsedExpr.GetExpr(), - SourceInfo: parsedExpr.GetSourceInfo(), - TypeMap: m, - ReferenceMap: c.references, - }, errs + return c.AST, errs } -func (c *checker) check(e *exprpb.Expr) { +func (c *checker) check(e ast.Expr) { if e == nil { return } - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - literal := e.GetConstExpr() - switch literal.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - c.checkBoolLiteral(e) - case *exprpb.Constant_BytesValue: - c.checkBytesLiteral(e) - case *exprpb.Constant_DoubleValue: - c.checkDoubleLiteral(e) - case *exprpb.Constant_Int64Value: - c.checkInt64Literal(e) - case *exprpb.Constant_NullValue: - c.checkNullLiteral(e) - case *exprpb.Constant_StringValue: - c.checkStringLiteral(e) - case *exprpb.Constant_Uint64Value: - c.checkUint64Literal(e) + switch e.Kind() { + case ast.LiteralKind: + literal := ref.Val(e.AsLiteral()) + switch literal.Type() { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + c.setType(e, literal.Type().(*types.Type)) + default: + c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName()) } - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: c.checkIdent(e) - case *exprpb.Expr_SelectExpr: + case ast.SelectKind: c.checkSelect(e) - case *exprpb.Expr_CallExpr: + case ast.CallKind: c.checkCall(e) - case *exprpb.Expr_ListExpr: + case ast.ListKind: c.checkCreateList(e) - case *exprpb.Expr_StructExpr: + case ast.MapKind: + c.checkCreateMap(e) + case ast.StructKind: c.checkCreateStruct(e) - case *exprpb.Expr_ComprehensionExpr: + case ast.ComprehensionKind: c.checkComprehension(e) default: - c.errors.unexpectedASTType(e.GetId(), c.location(e), e) + c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name()) } } -func (c *checker) checkInt64Literal(e *exprpb.Expr) { - c.setType(e, types.IntType) -} - -func (c *checker) checkUint64Literal(e *exprpb.Expr) { - c.setType(e, types.UintType) -} - -func (c *checker) checkStringLiteral(e *exprpb.Expr) { - c.setType(e, types.StringType) -} - -func (c *checker) checkBytesLiteral(e *exprpb.Expr) { - c.setType(e, types.BytesType) -} - -func (c *checker) checkDoubleLiteral(e *exprpb.Expr) { - c.setType(e, types.DoubleType) -} - -func (c *checker) checkBoolLiteral(e *exprpb.Expr) { - c.setType(e, types.BoolType) -} - -func (c *checker) checkNullLiteral(e *exprpb.Expr) { - c.setType(e, types.NullType) -} - -func (c *checker) checkIdent(e *exprpb.Expr) { - identExpr := e.GetIdentExpr() +func (c *checker) checkIdent(e ast.Expr) { + identName := e.AsIdent() // Check to see if the identifier is declared. - if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil { + if ident := c.env.LookupIdent(identName); ident != nil { c.setType(e, ident.Type()) c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) // Overwrite the identifier with its fully qualified name. - identExpr.Name = ident.Name() + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) return } c.setType(e, types.ErrorType) - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName()) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName) } -func (c *checker) checkSelect(e *exprpb.Expr) { - sel := e.GetSelectExpr() +func (c *checker) checkSelect(e ast.Expr) { + sel := e.AsSelect() // Before traversing down the tree, try to interpret as qualified name. qname, found := containers.ToQualifiedName(e) if found { @@ -170,31 +130,26 @@ func (c *checker) checkSelect(e *exprpb.Expr) { // variable name. c.setType(e, ident.Type()) c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) - identName := ident.Name() - e.ExprKind = &exprpb.Expr_IdentExpr{ - IdentExpr: &exprpb.Expr_Ident{ - Name: identName, - }, - } + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) return } } - resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false) - if sel.TestOnly { + resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false) + if sel.IsTestOnly() { resultType = types.BoolType } c.setType(e, substitute(c.mappings, resultType, false)) } -func (c *checker) checkOptSelect(e *exprpb.Expr) { +func (c *checker) checkOptSelect(e ast.Expr) { // Collect metadata related to the opt select call packaged by the parser. - call := e.GetCallExpr() - operand := call.GetArgs()[0] - field := call.GetArgs()[1] + call := e.AsCall() + operand := call.Args()[0] + field := call.Args()[1] fieldName, isString := maybeUnwrapString(field) if !isString { - c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field) + c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field) return } @@ -204,7 +159,7 @@ func (c *checker) checkOptSelect(e *exprpb.Expr) { c.setReference(e, ast.NewFunctionReference("select_optional_field")) } -func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type { +func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type { // Interpret as field selection, first traversing down the operand. c.check(operand) operandType := substitute(c.mappings, c.getType(operand), false) @@ -222,7 +177,7 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option // Objects yield their field type declaration as the selection result type, but only if // the field is defined. messageType := targetType - if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found { + if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found { resultType = fieldType } case types.TypeParamKind: @@ -236,7 +191,7 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option // Dynamic / error values are treated as DYN type. Errors are handled this way as well // in order to allow forward progress on the check. if !isDynOrError(targetType) { - c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType) + c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType) } resultType = types.DynType } @@ -248,35 +203,34 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option return resultType } -func (c *checker) checkCall(e *exprpb.Expr) { +func (c *checker) checkCall(e ast.Expr) { // Note: similar logic exists within the `interpreter/planner.go`. If making changes here // please consider the impact on planner.go and consolidate implementations or mirror code // as appropriate. - call := e.GetCallExpr() - fnName := call.GetFunction() + call := e.AsCall() + fnName := call.FunctionName() if fnName == operators.OptSelect { c.checkOptSelect(e) return } - args := call.GetArgs() + args := call.Args() // Traverse arguments. for _, arg := range args { c.check(arg) } - target := call.GetTarget() // Regular static call with simple name. - if target == nil { + if !call.IsMemberFunction() { // Check for the existence of the function. fn := c.env.LookupFunction(fnName) if fn == nil { - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) c.setType(e, types.ErrorType) return } // Overwrite the function name with its fully qualified resolved name. - call.Function = fn.Name() + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) // Check to see whether the overload resolves. c.resolveOverloadOrError(e, fn, nil, args) return @@ -287,6 +241,7 @@ func (c *checker) checkCall(e *exprpb.Expr) { // target a.b. // // Check whether the target is a namespaced function name. + target := call.Target() qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target) if maybeQualified { maybeQualifiedName := qualifiedPrefix + "." + fnName @@ -295,15 +250,14 @@ func (c *checker) checkCall(e *exprpb.Expr) { // The function name is namespaced and so preserving the target operand would // be an inaccurate representation of the desired evaluation behavior. // Overwrite with fully-qualified resolved function name sans receiver target. - call.Target = nil - call.Function = fn.Name() + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) c.resolveOverloadOrError(e, fn, nil, args) return } } // Regular instance call. - c.check(call.Target) + c.check(target) fn := c.env.LookupFunction(fnName) // Function found, attempt overload resolution. if fn != nil { @@ -312,11 +266,11 @@ func (c *checker) checkCall(e *exprpb.Expr) { } // Function name not declared, record error. c.setType(e, types.ErrorType) - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) } func (c *checker) resolveOverloadOrError( - e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) { + e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) { // Attempt to resolve the overload. resolution := c.resolveOverload(e, fn, target, args) // No such overload, error noted in the resolveOverload call, type recorded here. @@ -330,7 +284,7 @@ func (c *checker) resolveOverloadOrError( } func (c *checker) resolveOverload( - call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution { + call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution { var argTypes []*types.Type if target != nil { @@ -362,8 +316,8 @@ func (c *checker) resolveOverload( for i, argType := range argTypes { if !c.isAssignable(argType, types.BoolType) { c.errors.typeMismatch( - args[i].GetId(), - c.locationByID(args[i].GetId()), + args[i].ID(), + c.locationByID(args[i].ID()), types.BoolType, argType) resultType = types.ErrorType @@ -408,29 +362,29 @@ func (c *checker) resolveOverload( for i, argType := range argTypes { argTypes[i] = substitute(c.mappings, argType, true) } - c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil) + c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil) return nil } return newResolution(checkedRef, resultType) } -func (c *checker) checkCreateList(e *exprpb.Expr) { - create := e.GetListExpr() +func (c *checker) checkCreateList(e ast.Expr) { + create := e.AsList() var elemsType *types.Type - optionalIndices := create.GetOptionalIndices() + optionalIndices := create.OptionalIndices() optionals := make(map[int32]bool, len(optionalIndices)) for _, optInd := range optionalIndices { optionals[optInd] = true } - for i, e := range create.GetElements() { + for i, e := range create.Elements() { c.check(e) elemType := c.getType(e) if optionals[int32(i)] { var isOptional bool elemType, isOptional = maybeUnwrapOptional(elemType) if !isOptional && !isDyn(elemType) { - c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType) + c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType) } } elemsType = c.joinTypes(e, elemsType, elemType) @@ -442,32 +396,24 @@ func (c *checker) checkCreateList(e *exprpb.Expr) { c.setType(e, types.NewListType(elemsType)) } -func (c *checker) checkCreateStruct(e *exprpb.Expr) { - str := e.GetStructExpr() - if str.GetMessageName() != "" { - c.checkCreateMessage(e) - } else { - c.checkCreateMap(e) - } -} - -func (c *checker) checkCreateMap(e *exprpb.Expr) { - mapVal := e.GetStructExpr() +func (c *checker) checkCreateMap(e ast.Expr) { + mapVal := e.AsMap() var mapKeyType *types.Type var mapValueType *types.Type - for _, ent := range mapVal.GetEntries() { - key := ent.GetMapKey() + for _, e := range mapVal.Entries() { + entry := e.AsMapEntry() + key := entry.Key() c.check(key) mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key)) - val := ent.GetValue() + val := entry.Value() c.check(val) valType := c.getType(val) - if ent.GetOptionalEntry() { + if entry.IsOptional() { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType) + c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType) } } mapValueType = c.joinTypes(val, mapValueType, valType) @@ -480,25 +426,28 @@ func (c *checker) checkCreateMap(e *exprpb.Expr) { c.setType(e, types.NewMapType(mapKeyType, mapValueType)) } -func (c *checker) checkCreateMessage(e *exprpb.Expr) { - msgVal := e.GetStructExpr() +func (c *checker) checkCreateStruct(e ast.Expr) { + msgVal := e.AsStruct() // Determine the type of the message. resultType := types.ErrorType - ident := c.env.LookupIdent(msgVal.GetMessageName()) + ident := c.env.LookupIdent(msgVal.TypeName()) if ident == nil { c.errors.undeclaredReference( - e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName()) + e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName()) c.setType(e, types.ErrorType) return } // Ensure the type name is fully qualified in the AST. typeName := ident.Name() - msgVal.MessageName = typeName - c.setReference(e, ast.NewIdentReference(ident.Name(), nil)) + if msgVal.TypeName() != typeName { + e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields())) + msgVal = e.AsStruct() + } + c.setReference(e, ast.NewIdentReference(typeName, nil)) identKind := ident.Type().Kind() if identKind != types.ErrorKind { if identKind != types.TypeKind { - c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName()) + c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName()) } else { resultType = ident.Type().Parameters()[0] // Backwards compatibility test between well-known types and message types @@ -509,7 +458,7 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { } else if resultType.Kind() == types.StructKind { typeName = resultType.DeclaredTypeName() } else { - c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName()) + c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName()) resultType = types.ErrorType } } @@ -517,37 +466,38 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { c.setType(e, resultType) // Check the field initializers. - for _, ent := range msgVal.GetEntries() { - field := ent.GetFieldKey() - value := ent.GetValue() + for _, f := range msgVal.Fields() { + field := f.AsStructField() + fieldName := field.Name() + value := field.Value() c.check(value) fieldType := types.ErrorType - ft, found := c.lookupFieldType(ent.GetId(), typeName, field) + ft, found := c.lookupFieldType(f.ID(), typeName, fieldName) if found { fieldType = ft } valType := c.getType(value) - if ent.GetOptionalEntry() { + if field.IsOptional() { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType) + c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType) } } if !c.isAssignable(fieldType, valType) { - c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType) + c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType) } } } -func (c *checker) checkComprehension(e *exprpb.Expr) { - comp := e.GetComprehensionExpr() - c.check(comp.GetIterRange()) - c.check(comp.GetAccuInit()) - accuType := c.getType(comp.GetAccuInit()) - rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false) +func (c *checker) checkComprehension(e ast.Expr) { + comp := e.AsComprehension() + c.check(comp.IterRange()) + c.check(comp.AccuInit()) + accuType := c.getType(comp.AccuInit()) + rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) var varType *types.Type switch rangeType.Kind() { @@ -564,32 +514,32 @@ func (c *checker) checkComprehension(e *exprpb.Expr) { // Set the range iteration variable to type DYN as well. varType = types.DynType default: - c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType) + c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType) varType = types.ErrorType } // Create a scope for the comprehension since it has a local accumulation variable. // This scope will contain the accumulation variable used to compute the result. c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType)) + c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) // Create a block scope for the loop. c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType)) + c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType)) // Check the variable references in the condition and step. - c.check(comp.GetLoopCondition()) - c.assertType(comp.GetLoopCondition(), types.BoolType) - c.check(comp.GetLoopStep()) - c.assertType(comp.GetLoopStep(), accuType) + c.check(comp.LoopCondition()) + c.assertType(comp.LoopCondition(), types.BoolType) + c.check(comp.LoopStep()) + c.assertType(comp.LoopStep(), accuType) // Exit the loop's block scope before checking the result. c.env = c.env.exitScope() - c.check(comp.GetResult()) + c.check(comp.Result()) // Exit the comprehension scope. c.env = c.env.exitScope() - c.setType(e, substitute(c.mappings, c.getType(comp.GetResult()), false)) + c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false)) } // Checks compatibility of joined types, and returns the most general common type. -func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type { +func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type { if previous == nil { return current } @@ -599,7 +549,7 @@ func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *type if c.dynAggregateLiteralElementTypesEnabled() { return types.DynType } - c.errors.typeMismatch(e.GetId(), c.location(e), previous, current) + c.errors.typeMismatch(e.ID(), c.location(e), previous, current) return types.ErrorType } @@ -633,41 +583,41 @@ func (c *checker) isAssignableList(l1, l2 []*types.Type) bool { return false } -func maybeUnwrapString(e *exprpb.Expr) (string, bool) { - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - literal := e.GetConstExpr() - switch literal.GetConstantKind().(type) { - case *exprpb.Constant_StringValue: - return literal.GetStringValue(), true +func maybeUnwrapString(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.LiteralKind: + literal := e.AsLiteral() + switch v := literal.(type) { + case types.String: + return string(v), true } } return "", false } -func (c *checker) setType(e *exprpb.Expr, t *types.Type) { - if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) { - c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t) +func (c *checker) setType(e ast.Expr, t *types.Type) { + if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) { + c.errors.incompatibleType(e.ID(), c.location(e), e, old, t) return } - c.types[e.GetId()] = t + c.SetType(e.ID(), t) } -func (c *checker) getType(e *exprpb.Expr) *types.Type { - return c.types[e.GetId()] +func (c *checker) getType(e ast.Expr) *types.Type { + return c.TypeMap()[e.ID()] } -func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) { - if old, found := c.references[e.GetId()]; found && !old.Equals(r) { - c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r) +func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) { + if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) { + c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r) return } - c.references[e.GetId()] = r + c.SetReference(e.ID(), r) } -func (c *checker) assertType(e *exprpb.Expr, t *types.Type) { +func (c *checker) assertType(e ast.Expr, t *types.Type) { if !c.isAssignable(t, c.getType(e)) { - c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e)) + c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e)) } } @@ -683,26 +633,12 @@ func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution { } } -func (c *checker) location(e *exprpb.Expr) common.Location { - return c.locationByID(e.GetId()) +func (c *checker) location(e ast.Expr) common.Location { + return c.locationByID(e.ID()) } func (c *checker) locationByID(id int64) common.Location { - positions := c.sourceInfo.GetPositions() - var line = 1 - if offset, found := positions[id]; found { - col := int(offset) - for _, lineOffset := range c.sourceInfo.GetLineOffsets() { - if lineOffset < offset { - line++ - col = int(offset - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) - } - return common.NoLocation + return c.SourceInfo().GetStartLocation(id) } func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) { diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go index fd3f735051..04244694d8 100644 --- a/vendor/github.com/google/cel-go/checker/cost.go +++ b/vendor/github.com/google/cel-go/checker/cost.go @@ -22,8 +22,6 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go @@ -58,7 +56,7 @@ type AstNode interface { // Type returns the deduced type of the AstNode. Type() *types.Type // Expr returns the expression of the AstNode. - Expr() *exprpb.Expr + Expr() ast.Expr // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no @@ -69,7 +67,7 @@ type AstNode interface { type astNode struct { path []string t *types.Type - expr *exprpb.Expr + expr ast.Expr derivedSize *SizeEstimate } @@ -81,7 +79,7 @@ func (e astNode) Type() *types.Type { return e.t } -func (e astNode) Expr() *exprpb.Expr { +func (e astNode) Expr() ast.Expr { return e.expr } @@ -90,29 +88,27 @@ func (e astNode) ComputedSize() *SizeEstimate { return e.derivedSize } var v uint64 - switch ek := e.expr.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - switch ck := ek.ConstExpr.GetConstantKind().(type) { - case *exprpb.Constant_StringValue: + switch e.expr.Kind() { + case ast.LiteralKind: + switch ck := e.expr.AsLiteral().(type) { + case types.String: // converting to runes here is an O(n) operation, but // this is consistent with how size is computed at runtime, // and how the language definition defines string size - v = uint64(len([]rune(ck.StringValue))) - case *exprpb.Constant_BytesValue: - v = uint64(len(ck.BytesValue)) - case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue, - *exprpb.Constant_Int64Value, *exprpb.Constant_TimestampValue, *exprpb.Constant_Uint64Value, - *exprpb.Constant_NullValue: + v = uint64(len([]rune(ck))) + case types.Bytes: + v = uint64(len(ck)) + case types.Bool, types.Double, types.Duration, + types.Int, types.Timestamp, types.Uint, + types.Null: v = uint64(1) default: return nil } - case *exprpb.Expr_ListExpr: - v = uint64(len(ek.ListExpr.GetElements())) - case *exprpb.Expr_StructExpr: - if ek.StructExpr.GetMessageName() == "" { - v = uint64(len(ek.StructExpr.GetEntries())) - } + case ast.ListKind: + v = uint64(e.expr.AsList().Size()) + case ast.MapKind: + v = uint64(e.expr.AsMap().Size()) default: return nil } @@ -265,7 +261,7 @@ type coster struct { iterRanges iterRangeScopes // computedSizes tracks the computed sizes of call results. computedSizes map[int64]SizeEstimate - checkedAST *ast.CheckedAST + checkedAST *ast.AST estimator CostEstimator overloadEstimators map[string]FunctionEstimator // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. @@ -275,8 +271,8 @@ type coster struct { // Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names. type iterRangeScopes map[string][]int64 -func (vs iterRangeScopes) push(varName string, expr *exprpb.Expr) { - vs[varName] = append(vs[varName], expr.GetId()) +func (vs iterRangeScopes) push(varName string, expr ast.Expr) { + vs[varName] = append(vs[varName], expr.ID()) } func (vs iterRangeScopes) pop(varName string) { @@ -324,9 +320,9 @@ func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) C } // Cost estimates the cost of the parsed and type checked CEL expression. -func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { +func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { c := &coster{ - checkedAST: checker, + checkedAST: checked, estimator: estimator, overloadEstimators: map[string]FunctionEstimator{}, exprPath: map[int64][]string{}, @@ -340,28 +336,30 @@ func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) return CostEstimate{}, err } } - return c.cost(checker.Expr), nil + return c.cost(checked.Expr()), nil } -func (c *coster) cost(e *exprpb.Expr) CostEstimate { +func (c *coster) cost(e ast.Expr) CostEstimate { if e == nil { return CostEstimate{} } var cost CostEstimate - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: + switch e.Kind() { + case ast.LiteralKind: cost = constCost - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: cost = c.costIdent(e) - case *exprpb.Expr_SelectExpr: + case ast.SelectKind: cost = c.costSelect(e) - case *exprpb.Expr_CallExpr: + case ast.CallKind: cost = c.costCall(e) - case *exprpb.Expr_ListExpr: + case ast.ListKind: cost = c.costCreateList(e) - case *exprpb.Expr_StructExpr: + case ast.MapKind: + cost = c.costCreateMap(e) + case ast.StructKind: cost = c.costCreateStruct(e) - case *exprpb.Expr_ComprehensionExpr: + case ast.ComprehensionKind: cost = c.costComprehension(e) default: return CostEstimate{} @@ -369,53 +367,51 @@ func (c *coster) cost(e *exprpb.Expr) CostEstimate { return cost } -func (c *coster) costIdent(e *exprpb.Expr) CostEstimate { - identExpr := e.GetIdentExpr() - +func (c *coster) costIdent(e ast.Expr) CostEstimate { + identName := e.AsIdent() // build and track the field path - if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok { - switch c.checkedAST.TypeMap[iterRange].Kind() { + if iterRange, ok := c.iterRanges.peek(identName); ok { + switch c.checkedAST.GetType(iterRange).Kind() { case types.ListKind: c.addPath(e, append(c.exprPath[iterRange], "@items")) case types.MapKind: c.addPath(e, append(c.exprPath[iterRange], "@keys")) } } else { - c.addPath(e, []string{identExpr.GetName()}) + c.addPath(e, []string{identName}) } return selectAndIdentCost } -func (c *coster) costSelect(e *exprpb.Expr) CostEstimate { - sel := e.GetSelectExpr() +func (c *coster) costSelect(e ast.Expr) CostEstimate { + sel := e.AsSelect() var sum CostEstimate - if sel.GetTestOnly() { + if sel.IsTestOnly() { // recurse, but do not add any cost // this is equivalent to how evalTestOnly increments the runtime cost counter // but does not add any additional cost for the qualifier, except here we do // the reverse (ident adds cost) sum = sum.Add(c.presenceTestCost) - sum = sum.Add(c.cost(sel.GetOperand())) + sum = sum.Add(c.cost(sel.Operand())) return sum } - sum = sum.Add(c.cost(sel.GetOperand())) - targetType := c.getType(sel.GetOperand()) + sum = sum.Add(c.cost(sel.Operand())) + targetType := c.getType(sel.Operand()) switch targetType.Kind() { case types.MapKind, types.StructKind, types.TypeParamKind: sum = sum.Add(selectAndIdentCost) } // build and track the field path - c.addPath(e, append(c.getPath(sel.GetOperand()), sel.GetField())) + c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName())) return sum } -func (c *coster) costCall(e *exprpb.Expr) CostEstimate { - call := e.GetCallExpr() - target := call.GetTarget() - args := call.GetArgs() +func (c *coster) costCall(e ast.Expr) CostEstimate { + call := e.AsCall() + args := call.Args() var sum CostEstimate @@ -426,22 +422,20 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { argTypes[i] = c.newAstNode(arg) } - ref := c.checkedAST.ReferenceMap[e.GetId()] - if ref == nil || len(ref.OverloadIDs) == 0 { + overloadIDs := c.checkedAST.GetOverloadIDs(e.ID()) + if len(overloadIDs) == 0 { return CostEstimate{} } var targetType AstNode - if target != nil { - if call.Target != nil { - sum = sum.Add(c.cost(call.GetTarget())) - targetType = c.newAstNode(call.GetTarget()) - } + if call.IsMemberFunction() { + sum = sum.Add(c.cost(call.Target())) + targetType = c.newAstNode(call.Target()) } // Pick a cost estimate range that covers all the overload cost estimation ranges fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} var resultSize *SizeEstimate - for _, overload := range ref.OverloadIDs { - overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts) + for _, overload := range overloadIDs { + overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts) fnCost = fnCost.Union(overloadCost.CostEstimate) if overloadCost.ResultSize != nil { if resultSize == nil { @@ -464,64 +458,56 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { } } if resultSize != nil { - c.computedSizes[e.GetId()] = *resultSize + c.computedSizes[e.ID()] = *resultSize } return sum.Add(fnCost) } -func (c *coster) costCreateList(e *exprpb.Expr) CostEstimate { - create := e.GetListExpr() +func (c *coster) costCreateList(e ast.Expr) CostEstimate { + create := e.AsList() var sum CostEstimate - for _, e := range create.GetElements() { + for _, e := range create.Elements() { sum = sum.Add(c.cost(e)) } return sum.Add(createListBaseCost) } -func (c *coster) costCreateStruct(e *exprpb.Expr) CostEstimate { - str := e.GetStructExpr() - if str.MessageName != "" { - return c.costCreateMessage(e) - } - return c.costCreateMap(e) -} - -func (c *coster) costCreateMap(e *exprpb.Expr) CostEstimate { - mapVal := e.GetStructExpr() +func (c *coster) costCreateMap(e ast.Expr) CostEstimate { + mapVal := e.AsMap() var sum CostEstimate - for _, ent := range mapVal.GetEntries() { - key := ent.GetMapKey() - sum = sum.Add(c.cost(key)) - - sum = sum.Add(c.cost(ent.GetValue())) + for _, ent := range mapVal.Entries() { + entry := ent.AsMapEntry() + sum = sum.Add(c.cost(entry.Key())) + sum = sum.Add(c.cost(entry.Value())) } return sum.Add(createMapBaseCost) } -func (c *coster) costCreateMessage(e *exprpb.Expr) CostEstimate { - msgVal := e.GetStructExpr() +func (c *coster) costCreateStruct(e ast.Expr) CostEstimate { + msgVal := e.AsStruct() var sum CostEstimate - for _, ent := range msgVal.GetEntries() { - sum = sum.Add(c.cost(ent.GetValue())) + for _, ent := range msgVal.Fields() { + field := ent.AsStructField() + sum = sum.Add(c.cost(field.Value())) } return sum.Add(createMessageBaseCost) } -func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate { - comp := e.GetComprehensionExpr() +func (c *coster) costComprehension(e ast.Expr) CostEstimate { + comp := e.AsComprehension() var sum CostEstimate - sum = sum.Add(c.cost(comp.GetIterRange())) - sum = sum.Add(c.cost(comp.GetAccuInit())) + sum = sum.Add(c.cost(comp.IterRange())) + sum = sum.Add(c.cost(comp.AccuInit())) // Track the iterRange of each IterVar for field path construction - c.iterRanges.push(comp.GetIterVar(), comp.GetIterRange()) - loopCost := c.cost(comp.GetLoopCondition()) - stepCost := c.cost(comp.GetLoopStep()) - c.iterRanges.pop(comp.GetIterVar()) - sum = sum.Add(c.cost(comp.Result)) - rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange())) + c.iterRanges.push(comp.IterVar(), comp.IterRange()) + loopCost := c.cost(comp.LoopCondition()) + stepCost := c.cost(comp.LoopStep()) + c.iterRanges.pop(comp.IterVar()) + sum = sum.Add(c.cost(comp.Result())) + rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange())) - c.computedSizes[e.GetId()] = rangeCnt + c.computedSizes[e.ID()] = rangeCnt rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost)) sum = sum.Add(rangeCost) @@ -674,26 +660,26 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} } -func (c *coster) getType(e *exprpb.Expr) *types.Type { - return c.checkedAST.TypeMap[e.GetId()] +func (c *coster) getType(e ast.Expr) *types.Type { + return c.checkedAST.GetType(e.ID()) } -func (c *coster) getPath(e *exprpb.Expr) []string { - return c.exprPath[e.GetId()] +func (c *coster) getPath(e ast.Expr) []string { + return c.exprPath[e.ID()] } -func (c *coster) addPath(e *exprpb.Expr, path []string) { - c.exprPath[e.GetId()] = path +func (c *coster) addPath(e ast.Expr, path []string) { + c.exprPath[e.ID()] = path } -func (c *coster) newAstNode(e *exprpb.Expr) *astNode { +func (c *coster) newAstNode(e ast.Expr) *astNode { path := c.getPath(e) if len(path) > 0 && path[0] == parser.AccumulatorName { // only provide paths to root vars; omit accumulator vars path = nil } var derivedSize *SizeEstimate - if size, ok := c.computedSizes[e.GetId()]; ok { + if size, ok := c.computedSizes[e.ID()]; ok { derivedSize = &size } return &astNode{ diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go index 0d91bef514..c0e5de469f 100644 --- a/vendor/github.com/google/cel-go/checker/decls/decls.go +++ b/vendor/github.com/google/cel-go/checker/decls/decls.go @@ -67,7 +67,7 @@ func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type { // NewOptionalType constructs an abstract type indicating that the parameterized type // may be contained within the object. func NewOptionalType(paramType *exprpb.Type) *exprpb.Type { - return NewAbstractType("optional", paramType) + return NewAbstractType("optional_type", paramType) } // NewFunctionType creates a function invocation contract, typically only used diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go index 70682b17c6..d5ac05014e 100644 --- a/vendor/github.com/google/cel-go/checker/env.go +++ b/vendor/github.com/google/cel-go/checker/env.go @@ -146,6 +146,14 @@ func (e *Env) LookupIdent(name string) *decls.VariableDecl { return decl } + if i, found := e.provider.FindIdent(candidate); found { + if t, ok := i.(*types.Type); ok { + decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t)) + e.declarations.AddIdent(decl) + return decl + } + } + // Next try to import this as an enum value by splitting the name in a type prefix and // the enum inside. if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go index c2b96498d1..8b3bf0b8b6 100644 --- a/vendor/github.com/google/cel-go/checker/errors.go +++ b/vendor/github.com/google/cel-go/checker/errors.go @@ -15,13 +15,9 @@ package checker import ( - "reflect" - "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // typeErrors is a specialization of Errors. @@ -34,9 +30,9 @@ func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, name, FormatCELType(field), FormatCELType(value)) } -func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) { +func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) { e.errs.ReportErrorAtID(id, l, - "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) + "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) } func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) { @@ -49,7 +45,7 @@ func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *type FormatCELType(t)) } -func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) { +func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) { e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field) } @@ -61,9 +57,9 @@ func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName strin e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName) } -func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) { +func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) { e.errs.ReportErrorAtID(id, l, - "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) + "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) } func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) { @@ -87,6 +83,6 @@ func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typ e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName) } -func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) { - e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex)) +func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) { + e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName) } diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go index 15cba06ee9..7a3984f02c 100644 --- a/vendor/github.com/google/cel-go/checker/printer.go +++ b/vendor/github.com/google/cel-go/checker/printer.go @@ -17,40 +17,40 @@ package checker import ( "sort" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/debug" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) type semanticAdorner struct { - checks *exprpb.CheckedExpr + checked *ast.AST } var _ debug.Adorner = &semanticAdorner{} func (a *semanticAdorner) GetMetadata(elem any) string { result := "" - e, isExpr := elem.(*exprpb.Expr) + e, isExpr := elem.(ast.Expr) if !isExpr { return result } - t := a.checks.TypeMap[e.GetId()] + t := a.checked.TypeMap()[e.ID()] if t != nil { result += "~" - result += FormatCheckedType(t) + result += FormatCELType(t) } - switch e.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr, - *exprpb.Expr_CallExpr, - *exprpb.Expr_StructExpr, - *exprpb.Expr_SelectExpr: - if ref, found := a.checks.ReferenceMap[e.GetId()]; found { - if len(ref.GetOverloadId()) == 0 { + switch e.Kind() { + case ast.IdentKind, + ast.CallKind, + ast.ListKind, + ast.StructKind, + ast.SelectKind: + if ref, found := a.checked.ReferenceMap()[e.ID()]; found { + if len(ref.OverloadIDs) == 0 { result += "^" + ref.Name } else { - sort.Strings(ref.GetOverloadId()) - for i, overload := range ref.GetOverloadId() { + sort.Strings(ref.OverloadIDs) + for i, overload := range ref.OverloadIDs { if i == 0 { result += "^" } else { @@ -68,7 +68,7 @@ func (a *semanticAdorner) GetMetadata(elem any) string { // Print returns a string representation of the Expr message, // annotated with types from the CheckedExpr. The Expr must // be a sub-expression embedded in the CheckedExpr. -func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string { - a := &semanticAdorner{checks: checks} +func Print(e ast.Expr, checked *ast.AST) string { + a := &semanticAdorner{checked: checked} return debug.ToAdornedDebugString(e, a) } diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go deleted file mode 100644 index 11b35b80ee..0000000000 --- a/vendor/github.com/google/cel-go/checker/standard.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "github.com/google/cel-go/common/stdlib" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// StandardFunctions returns the Decls for all functions in the evaluator. -// -// Deprecated: prefer stdlib.FunctionExprDecls() -func StandardFunctions() []*exprpb.Decl { - return stdlib.FunctionExprDecls() -} - -// StandardTypes returns the set of type identifiers for standard library types. -// -// Deprecated: prefer stdlib.TypeExprDecls() -func StandardTypes() []*exprpb.Decl { - return stdlib.TypeExprDecls() -} diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go index e2373d1b7c..4c65b2737c 100644 --- a/vendor/github.com/google/cel-go/checker/types.go +++ b/vendor/github.com/google/cel-go/checker/types.go @@ -41,7 +41,7 @@ func isError(t *types.Type) bool { func isOptional(t *types.Type) bool { if t.Kind() == types.OpaqueKind { - return t.TypeName() == "optional" + return t.TypeName() == "optional_type" } return false } @@ -137,7 +137,11 @@ func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind, types.AnyKind, types.DurationKind, types.TimestampKind, types.StructKind: - return t1.IsAssignableType(t2) + // Test whether t2 is assignable from t1. The order of this check won't usually matter; + // however, there may be cases where type capabilities are expanded beyond what is supported + // in the current common/types package. For example, an interface designation for a group of + // Struct types. + return t2.IsAssignableType(t1) case types.TypeKind: return kind2 == types.TypeKind case types.OpaqueKind, types.ListKind, types.MapKind: @@ -256,7 +260,7 @@ func notReferencedIn(m *mapping, t, withinType *types.Type) bool { return true } return notReferencedIn(m, t, wtSub) - case types.OpaqueKind, types.ListKind, types.MapKind: + case types.OpaqueKind, types.ListKind, types.MapKind, types.TypeKind: for _, pt := range withinType.Parameters() { if !notReferencedIn(m, t, pt) { return false @@ -288,7 +292,8 @@ func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type { substitute(m, t.Parameters()[1], typeParamToDyn)) case types.TypeKind: if len(t.Parameters()) > 0 { - return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn)) + tParam := t.Parameters()[0] + return types.NewTypeTypeWithParam(substitute(m, tParam, typeParamToDyn)) } return t default: diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel index d6165b13af..eef7f281be 100644 --- a/vendor/github.com/google/cel-go/common/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -18,7 +18,6 @@ go_library( deps = [ "//common/runes:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - "@org_golang_x_text//width:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel index 7269cdff5f..5c40c37813 100644 --- a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -1,12 +1,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package( - default_visibility = [ - "//cel:__subpackages__", - "//checker:__subpackages__", - "//common:__subpackages__", - "//interpreter:__subpackages__", - ], + default_visibility = ["//visibility:public"], licenses = ["notice"], # Apache 2.0 ) @@ -14,10 +9,14 @@ go_library( name = "go_default_library", srcs = [ "ast.go", + "conversion.go", "expr.go", + "factory.go", + "navigable.go", ], importpath = "github.com/google/cel-go/common/ast", deps = [ + "//common:go_default_library", "//common/types:go_default_library", "//common/types/ref:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", @@ -29,7 +28,9 @@ go_test( name = "go_default_test", srcs = [ "ast_test.go", + "conversion_test.go", "expr_test.go", + "navigable_test.go", ], embed = [ ":go_default_library", @@ -48,5 +49,6 @@ go_test( "//test/proto3pb:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", ], -) \ No newline at end of file +) diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go index b3c150793a..b807669d49 100644 --- a/vendor/github.com/google/cel-go/common/ast/ast.go +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -16,74 +16,362 @@ package ast import ( - "fmt" - + "github.com/google/cel-go/common" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" +) - structpb "google.golang.org/protobuf/types/known/structpb" +// AST contains a protobuf expression and source info along with CEL-native type and reference information. +type AST struct { + expr Expr + sourceInfo *SourceInfo + typeMap map[int64]*types.Type + refMap map[int64]*ReferenceInfo +} - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) +// Expr returns the root ast.Expr value in the AST. +func (a *AST) Expr() Expr { + if a == nil { + return nilExpr + } + return a.expr +} -// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information. -type CheckedAST struct { - Expr *exprpb.Expr - SourceInfo *exprpb.SourceInfo - TypeMap map[int64]*types.Type - ReferenceMap map[int64]*ReferenceInfo -} - -// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf. -func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) { - refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap)) - for id, ref := range ast.ReferenceMap { - r, err := ReferenceInfoToReferenceExpr(ref) - if err != nil { - return nil, err - } - refMap[id] = r +// SourceInfo returns the source metadata associated with the parse / type-check passes. +func (a *AST) SourceInfo() *SourceInfo { + if a == nil { + return nil + } + return a.sourceInfo +} + +// GetType returns the type for the expression at the given id, if one exists, else types.DynType. +func (a *AST) GetType(id int64) *types.Type { + if t, found := a.TypeMap()[id]; found { + return t + } + return types.DynType +} + +// SetType sets the type of the expression node at the given id. +func (a *AST) SetType(id int64, t *types.Type) { + if a == nil { + return + } + a.typeMap[id] = t +} + +// TypeMap returns the map of expression ids to type-checked types. +// +// If the AST is not type-checked, the map will be empty. +func (a *AST) TypeMap() map[int64]*types.Type { + if a == nil { + return map[int64]*types.Type{} + } + return a.typeMap +} + +// GetOverloadIDs returns the set of overload function names for a given expression id. +// +// If the expression id is not a function call, or the AST is not type-checked, the result will be empty. +func (a *AST) GetOverloadIDs(id int64) []string { + if ref, found := a.ReferenceMap()[id]; found { + return ref.OverloadIDs } - typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap)) - for id, typ := range ast.TypeMap { - t, err := types.TypeToExprType(typ) - if err != nil { - return nil, err + return []string{} +} + +// ReferenceMap returns the map of expression id to identifier, constant, and function references. +func (a *AST) ReferenceMap() map[int64]*ReferenceInfo { + if a == nil { + return map[int64]*ReferenceInfo{} + } + return a.refMap +} + +// SetReference adds a reference to the checked AST type map. +func (a *AST) SetReference(id int64, r *ReferenceInfo) { + if a == nil { + return + } + a.refMap[id] = r +} + +// IsChecked returns whether the AST is type-checked. +func (a *AST) IsChecked() bool { + return a != nil && len(a.TypeMap()) > 0 +} + +// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value. +func NewAST(e Expr, sourceInfo *SourceInfo) *AST { + if e == nil { + e = nilExpr + } + return &AST{ + expr: e, + sourceInfo: sourceInfo, + typeMap: make(map[int64]*types.Type), + refMap: make(map[int64]*ReferenceInfo), + } +} + +// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata. +func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST { + return &AST{ + expr: parsed.Expr(), + sourceInfo: parsed.SourceInfo(), + typeMap: typeMap, + refMap: refMap, + } +} + +// Copy creates a deep copy of the Expr and SourceInfo values in the input AST. +// +// Copies of the Expr value are generated using an internal default ExprFactory. +func Copy(a *AST) *AST { + if a == nil { + return nil + } + e := defaultFactory.CopyExpr(a.expr) + if !a.IsChecked() { + return NewAST(e, CopySourceInfo(a.SourceInfo())) + } + typesCopy := make(map[int64]*types.Type, len(a.typeMap)) + for id, t := range a.typeMap { + typesCopy[id] = t + } + refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap)) + for id, r := range a.refMap { + refsCopy[id] = r + } + return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy) +} + +// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value. +func MaxID(a *AST) int64 { + visitor := &maxIDVisitor{maxID: 1} + PostOrderVisit(a.Expr(), visitor) + for id, call := range a.SourceInfo().MacroCalls() { + PostOrderVisit(call, visitor) + if id > visitor.maxID { + visitor.maxID = id + 1 } - typeMap[id] = t - } - return &exprpb.CheckedExpr{ - Expr: ast.Expr, - SourceInfo: ast.SourceInfo, - ReferenceMap: refMap, - TypeMap: typeMap, - }, nil -} - -// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance. -func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) { - refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) - for id, ref := range checked.GetReferenceMap() { - r, err := ReferenceExprToReferenceInfo(ref) - if err != nil { - return nil, err + } + return visitor.maxID + 1 +} + +// NewSourceInfo creates a simple SourceInfo object from an input common.Source value. +func NewSourceInfo(src common.Source) *SourceInfo { + var lineOffsets []int32 + var desc string + baseLine := int32(0) + baseCol := int32(0) + if src != nil { + desc = src.Description() + lineOffsets = src.LineOffsets() + // Determine whether the source metadata should be computed relative + // to a base line and column value. This can be determined by requesting + // the location for offset 0 from the source object. + if loc, found := src.OffsetLocation(0); found { + baseLine = int32(loc.Line()) - 1 + baseCol = int32(loc.Column()) } - refMap[id] = r } - typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) - for id, typ := range checked.GetTypeMap() { - t, err := types.ExprTypeToType(typ) - if err != nil { - return nil, err + return &SourceInfo{ + desc: desc, + lines: lineOffsets, + baseLine: baseLine, + baseCol: baseCol, + offsetRanges: make(map[int64]OffsetRange), + macroCalls: make(map[int64]Expr), + } +} + +// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo. +// +// Copies of macro Expr values are generated using an internal default ExprFactory. +func CopySourceInfo(info *SourceInfo) *SourceInfo { + if info == nil { + return nil + } + rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges)) + for id, off := range info.offsetRanges { + rangesCopy[id] = off + } + callsCopy := make(map[int64]Expr, len(info.macroCalls)) + for id, call := range info.macroCalls { + callsCopy[id] = defaultFactory.CopyExpr(call) + } + return &SourceInfo{ + syntax: info.syntax, + desc: info.desc, + lines: info.lines, + baseLine: info.baseLine, + baseCol: info.baseCol, + offsetRanges: rangesCopy, + macroCalls: callsCopy, + } +} + +// SourceInfo records basic information about the expression as a textual input and +// as a parsed expression value. +type SourceInfo struct { + syntax string + desc string + lines []int32 + baseLine int32 + baseCol int32 + offsetRanges map[int64]OffsetRange + macroCalls map[int64]Expr +} + +// SyntaxVersion returns the syntax version associated with the text expression. +func (s *SourceInfo) SyntaxVersion() string { + if s == nil { + return "" + } + return s.syntax +} + +// Description provides information about where the expression came from. +func (s *SourceInfo) Description() string { + if s == nil { + return "" + } + return s.desc +} + +// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear. +func (s *SourceInfo) LineOffsets() []int32 { + if s == nil { + return []int32{} + } + return s.lines +} + +// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression +// node where the macro was inserted into the AST, and the ast.Expr value represents the original call +// signature which was replaced. +func (s *SourceInfo) MacroCalls() map[int64]Expr { + if s == nil { + return map[int64]Expr{} + } + return s.macroCalls +} + +// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via +// a macro replacement. +// +// Note, parsing options must be enabled to track macro calls before this method will return a value. +func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) { + e, found := s.MacroCalls()[id] + return e, found +} + +// SetMacroCall records a macro call at a specific location. +func (s *SourceInfo) SetMacroCall(id int64, e Expr) { + if s != nil { + s.macroCalls[id] = e + } +} + +// ClearMacroCall removes the macro call at the given expression id. +func (s *SourceInfo) ClearMacroCall(id int64) { + if s != nil { + delete(s.macroCalls, id) + } +} + +// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either: +// the start and end position in the input stream where the expression occurs, or the start position +// only. If the range only captures start position, the stop position of the range will be equal to +// the start. +func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange { + if s == nil { + return map[int64]OffsetRange{} + } + return s.offsetRanges +} + +// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists. +func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) { + if s == nil { + return OffsetRange{}, false + } + o, found := s.offsetRanges[id] + return o, found +} + +// SetOffsetRange sets the OffsetRange for the given expression id. +func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) { + if s == nil { + return + } + s.offsetRanges[id] = o +} + +// ClearOffsetRange removes the OffsetRange for the given expression id. +func (s *SourceInfo) ClearOffsetRange(id int64) { + if s != nil { + delete(s.offsetRanges, id) + } +} + +// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character +// of the expression node at the id. +func (s *SourceInfo) GetStartLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Start) + } + return common.NoLocation +} + +// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for +// the expression node at the given id. +// +// If the SourceInfo was generated from a serialized protobuf representation, the stop location will +// be identical to the start location for the expression. +func (s *SourceInfo) GetStopLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Stop) + } + return common.NoLocation +} + +// GetLocationByOffset returns the line and column information for a given character offset. +func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { + line := 1 + col := int(offset) + for _, lineOffset := range s.LineOffsets() { + if lineOffset > offset { + break } - typeMap[id] = t + line++ + col = int(offset - lineOffset) } - return &CheckedAST{ - Expr: checked.GetExpr(), - SourceInfo: checked.GetSourceInfo(), - ReferenceMap: refMap, - TypeMap: typeMap, - }, nil + return common.NewLocation(line, col) +} + +// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column. +func (s *SourceInfo) ComputeOffset(line, col int32) int32 { + if s != nil { + line = s.baseLine + line + col = s.baseCol + col + } + if line == 1 { + return col + } + if line < 1 || line > int32(len(s.LineOffsets())) { + return -1 + } + offset := s.LineOffsets()[line-2] + return offset + col +} + +// OffsetRange captures the start and stop positions of a section of text in the input expression. +type OffsetRange struct { + Start int32 + Stop int32 } // ReferenceInfo contains a CEL native representation of an identifier reference which may refer to @@ -149,78 +437,21 @@ func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool { return true } -// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. -func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) { - c, err := ValToConstant(info.Value) - if err != nil { - return nil, err - } - return &exprpb.Reference{ - Name: info.Name, - OverloadId: info.OverloadIDs, - Value: c, - }, nil +type maxIDVisitor struct { + maxID int64 + *baseVisitor } -// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. -func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { - v, err := ConstantToVal(ref.GetValue()) - if err != nil { - return nil, err +// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed. +func (v *maxIDVisitor) VisitExpr(e Expr) { + if v.maxID < e.ID() { + v.maxID = e.ID() } - return &ReferenceInfo{ - Name: ref.GetName(), - OverloadIDs: ref.GetOverloadId(), - Value: v, - }, nil } -// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. -// -// Only simple scalar types are supported by this method. -func ValToConstant(v ref.Val) (*exprpb.Constant, error) { - if v == nil { - return nil, nil - } - switch v.Type() { - case types.BoolType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil - case types.BytesType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil - case types.DoubleType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil - case types.IntType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil - case types.NullType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil - case types.StringType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil - case types.UintType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil - } - return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) -} - -// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. -func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { - if c == nil { - return nil, nil - } - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return types.Bool(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: - return types.Bytes(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: - return types.Double(c.GetDoubleValue()), nil - case *exprpb.Constant_Int64Value: - return types.Int(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: - return types.NullValue, nil - case *exprpb.Constant_StringValue: - return types.String(c.GetStringValue()), nil - case *exprpb.Constant_Uint64Value: - return types.Uint(c.GetUint64Value()), nil - } - return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) +// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed. +func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) { + if v.maxID < e.ID() { + v.maxID = e.ID() + } } diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go new file mode 100644 index 0000000000..8f2c4bd1e6 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -0,0 +1,632 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + structpb "google.golang.org/protobuf/types/known/structpb" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// ToProto converts an AST to a CheckedExpr protobouf. +func ToProto(ast *AST) (*exprpb.CheckedExpr, error) { + refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap())) + for id, ref := range ast.ReferenceMap() { + r, err := ReferenceInfoToProto(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap())) + for id, typ := range ast.TypeMap() { + t, err := types.TypeToExprType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + e, err := ExprToProto(ast.Expr()) + if err != nil { + return nil, err + } + info, err := SourceInfoToProto(ast.SourceInfo()) + if err != nil { + return nil, err + } + return &exprpb.CheckedExpr{ + Expr: e, + SourceInfo: info, + ReferenceMap: refMap, + TypeMap: typeMap, + }, nil +} + +// ToAST converts a CheckedExpr protobuf to an AST instance. +func ToAST(checked *exprpb.CheckedExpr) (*AST, error) { + refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) + for id, ref := range checked.GetReferenceMap() { + r, err := ProtoToReferenceInfo(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) + for id, typ := range checked.GetTypeMap() { + t, err := types.ExprTypeToType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + info, err := ProtoToSourceInfo(checked.GetSourceInfo()) + if err != nil { + return nil, err + } + root, err := ProtoToExpr(checked.GetExpr()) + if err != nil { + return nil, err + } + ast := NewCheckedAST(NewAST(root, info), typeMap, refMap) + return ast, nil +} + +// ProtoToExpr converts a protobuf Expr value to an ast.Expr value. +func ProtoToExpr(e *exprpb.Expr) (Expr, error) { + factory := NewExprFactory() + return exprInternal(factory, e) +} + +// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr +func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + factory := NewExprFactory() + switch e.GetKeyKind().(type) { + case *exprpb.Expr_CreateStruct_Entry_FieldKey: + return exprStructField(factory, e.GetId(), e) + case *exprpb.Expr_CreateStruct_Entry_MapKey: + return exprMapEntry(factory, e.GetId(), e) + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) { + id := e.GetId() + switch e.GetExprKind().(type) { + case *exprpb.Expr_CallExpr: + return exprCall(factory, id, e.GetCallExpr()) + case *exprpb.Expr_ComprehensionExpr: + return exprComprehension(factory, id, e.GetComprehensionExpr()) + case *exprpb.Expr_ConstExpr: + return exprLiteral(factory, id, e.GetConstExpr()) + case *exprpb.Expr_IdentExpr: + return exprIdent(factory, id, e.GetIdentExpr()) + case *exprpb.Expr_ListExpr: + return exprList(factory, id, e.GetListExpr()) + case *exprpb.Expr_SelectExpr: + return exprSelect(factory, id, e.GetSelectExpr()) + case *exprpb.Expr_StructExpr: + s := e.GetStructExpr() + if s.GetMessageName() != "" { + return exprStruct(factory, id, s) + } + return exprMap(factory, id, s) + } + return factory.NewUnspecifiedExpr(id), nil +} + +func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) { + var err error + args := make([]Expr, len(call.GetArgs())) + for i, a := range call.GetArgs() { + args[i], err = exprInternal(factory, a) + if err != nil { + return nil, err + } + } + if call.GetTarget() == nil { + return factory.NewCall(id, call.GetFunction(), args...), nil + } + + target, err := exprInternal(factory, call.GetTarget()) + if err != nil { + return nil, err + } + return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil +} + +func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) { + iterRange, err := exprInternal(factory, comp.GetIterRange()) + if err != nil { + return nil, err + } + accuInit, err := exprInternal(factory, comp.GetAccuInit()) + if err != nil { + return nil, err + } + loopCond, err := exprInternal(factory, comp.GetLoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := exprInternal(factory, comp.GetLoopStep()) + if err != nil { + return nil, err + } + result, err := exprInternal(factory, comp.GetResult()) + if err != nil { + return nil, err + } + return factory.NewComprehension(id, + iterRange, + comp.GetIterVar(), + comp.GetAccuVar(), + accuInit, + loopCond, + loopStep, + result), nil +} + +func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) { + val, err := ConstantToVal(c) + if err != nil { + return nil, err + } + return factory.NewLiteral(id, val), nil +} + +func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) { + return factory.NewIdent(id, i.GetName()), nil +} + +func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) { + elems := make([]Expr, len(l.GetElements())) + for i, e := range l.GetElements() { + elem, err := exprInternal(factory, e) + if err != nil { + return nil, err + } + elems[i] = elem + } + return factory.NewList(id, elems, l.GetOptionalIndices()), nil +} + +func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + entries := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, entry := range s.GetEntries() { + entries[i], err = exprMapEntry(factory, entry.GetId(), entry) + if err != nil { + return nil, err + } + } + return factory.NewMap(id, entries), nil +} + +func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + k, err := exprInternal(factory, e.GetMapKey()) + if err != nil { + return nil, err + } + v, err := exprInternal(factory, e.GetValue()) + if err != nil { + return nil, err + } + return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil +} + +func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) { + op, err := exprInternal(factory, s.GetOperand()) + if err != nil { + return nil, err + } + if s.GetTestOnly() { + return factory.NewPresenceTest(id, op, s.GetField()), nil + } + return factory.NewSelect(id, op, s.GetField()), nil +} + +func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + fields := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, field := range s.GetEntries() { + fields[i], err = exprStructField(factory, field.GetId(), field) + if err != nil { + return nil, err + } + } + return factory.NewStruct(id, s.GetMessageName(), fields), nil +} + +func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + v, err := exprInternal(factory, f.GetValue()) + if err != nil { + return nil, err + } + return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil +} + +// ExprToProto serializes an ast.Expr value to a protobuf Expr representation. +func ExprToProto(e Expr) (*exprpb.Expr, error) { + if e == nil { + return &exprpb.Expr{}, nil + } + switch e.Kind() { + case CallKind: + return protoCall(e.ID(), e.AsCall()) + case ComprehensionKind: + return protoComprehension(e.ID(), e.AsComprehension()) + case IdentKind: + return protoIdent(e.ID(), e.AsIdent()) + case ListKind: + return protoList(e.ID(), e.AsList()) + case LiteralKind: + return protoLiteral(e.ID(), e.AsLiteral()) + case MapKind: + return protoMap(e.ID(), e.AsMap()) + case SelectKind: + return protoSelect(e.ID(), e.AsSelect()) + case StructKind: + return protoStruct(e.ID(), e.AsStruct()) + case UnspecifiedExprKind: + // Handle the case where a macro reference may be getting translated. + // A nested macro 'pointer' is a non-zero expression id with no kind set. + if e.ID() != 0 { + return &exprpb.Expr{Id: e.ID()}, nil + } + return &exprpb.Expr{}, nil + } + return nil, fmt.Errorf("unsupported expr kind: %v", e) +} + +// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry +func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) { + switch e.Kind() { + case MapEntryKind: + return protoMapEntry(e.ID(), e.AsMapEntry()) + case StructFieldKind: + return protoStructField(e.ID(), e.AsStructField()) + case UnspecifiedEntryExprKind: + return &exprpb.Expr_CreateStruct_Entry{}, nil + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) { + var err error + var target *exprpb.Expr + if call.IsMemberFunction() { + target, err = ExprToProto(call.Target()) + if err != nil { + return nil, err + } + } + callArgs := call.Args() + args := make([]*exprpb.Expr, len(callArgs)) + for i, a := range callArgs { + args[i], err = ExprToProto(a) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_CallExpr{ + CallExpr: &exprpb.Expr_Call{ + Function: call.FunctionName(), + Target: target, + Args: args, + }, + }, + }, nil +} + +func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) { + iterRange, err := ExprToProto(comp.IterRange()) + if err != nil { + return nil, err + } + accuInit, err := ExprToProto(comp.AccuInit()) + if err != nil { + return nil, err + } + loopCond, err := ExprToProto(comp.LoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := ExprToProto(comp.LoopStep()) + if err != nil { + return nil, err + } + result, err := ExprToProto(comp.Result()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ComprehensionExpr{ + ComprehensionExpr: &exprpb.Expr_Comprehension{ + IterVar: comp.IterVar(), + IterRange: iterRange, + AccuVar: comp.AccuVar(), + AccuInit: accuInit, + LoopCondition: loopCond, + LoopStep: loopStep, + Result: result, + }, + }, + }, nil +} + +func protoIdent(id int64, name string) (*exprpb.Expr, error) { + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_IdentExpr{ + IdentExpr: &exprpb.Expr_Ident{ + Name: name, + }, + }, + }, nil +} + +func protoList(id int64, list ListExpr) (*exprpb.Expr, error) { + var err error + elems := make([]*exprpb.Expr, list.Size()) + for i, e := range list.Elements() { + elems[i], err = ExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ListExpr{ + ListExpr: &exprpb.Expr_CreateList{ + Elements: elems, + OptionalIndices: list.OptionalIndices(), + }, + }, + }, nil +} + +func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) { + c, err := ValToConstant(val) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ConstExpr{ + ConstExpr: c, + }, + }, nil +} + +func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries())) + var err error + for i, e := range m.Entries() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + Entries: entries, + }, + }, + }, nil +} + +func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) { + k, err := ExprToProto(e.Key()) + if err != nil { + return nil, err + } + v, err := ExprToProto(e.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{ + MapKey: k, + }, + Value: v, + OptionalEntry: e.IsOptional(), + }, nil +} + +func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) { + op, err := ExprToProto(s.Operand()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_SelectExpr{ + SelectExpr: &exprpb.Expr_Select{ + Operand: op, + Field: s.FieldName(), + TestOnly: s.IsTestOnly(), + }, + }, + }, nil +} + +func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields())) + var err error + for i, e := range s.Fields() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + MessageName: s.TypeName(), + Entries: entries, + }, + }, + }, nil +} + +func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) { + v, err := ExprToProto(f.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{ + FieldKey: f.Name(), + }, + Value: v, + OptionalEntry: f.IsOptional(), + }, nil +} + +// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object. +func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) { + if info == nil { + return &exprpb.SourceInfo{}, nil + } + sourceInfo := &exprpb.SourceInfo{ + SyntaxVersion: info.SyntaxVersion(), + Location: info.Description(), + LineOffsets: info.LineOffsets(), + Positions: make(map[int64]int32, len(info.OffsetRanges())), + MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())), + } + for id, offset := range info.OffsetRanges() { + sourceInfo.Positions[id] = offset.Start + } + for id, e := range info.MacroCalls() { + call, err := ExprToProto(e) + if err != nil { + return nil, err + } + sourceInfo.MacroCalls[id] = call + } + return sourceInfo, nil +} + +// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value. +func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) { + sourceInfo := &SourceInfo{ + syntax: info.GetSyntaxVersion(), + desc: info.GetLocation(), + lines: info.GetLineOffsets(), + offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())), + macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())), + } + for id, offset := range info.GetPositions() { + sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset}) + } + for id, e := range info.GetMacroCalls() { + call, err := ProtoToExpr(e) + if err != nil { + return nil, err + } + sourceInfo.SetMacroCall(id, call) + } + return sourceInfo, nil +} + +// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. +func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) { + c, err := ValToConstant(info.Value) + if err != nil { + return nil, err + } + return &exprpb.Reference{ + Name: info.Name, + OverloadId: info.OverloadIDs, + Value: c, + }, nil +} + +// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. +func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { + v, err := ConstantToVal(ref.GetValue()) + if err != nil { + return nil, err + } + return &ReferenceInfo{ + Name: ref.GetName(), + OverloadIDs: ref.GetOverloadId(), + Value: v, + }, nil +} + +// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. +// +// Only simple scalar types are supported by this method. +func ValToConstant(v ref.Val) (*exprpb.Constant, error) { + if v == nil { + return nil, nil + } + switch v.Type() { + case types.BoolType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil + case types.BytesType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil + case types.DoubleType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil + case types.IntType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil + case types.NullType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil + case types.StringType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil + case types.UintType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) +} + +// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. +func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { + if c == nil { + return nil, nil + } + switch c.GetConstantKind().(type) { + case *exprpb.Constant_BoolValue: + return types.Bool(c.GetBoolValue()), nil + case *exprpb.Constant_BytesValue: + return types.Bytes(c.GetBytesValue()), nil + case *exprpb.Constant_DoubleValue: + return types.Double(c.GetDoubleValue()), nil + case *exprpb.Constant_Int64Value: + return types.Int(c.GetInt64Value()), nil + case *exprpb.Constant_NullValue: + return types.NullValue, nil + case *exprpb.Constant_StringValue: + return types.String(c.GetStringValue()), nil + case *exprpb.Constant_Uint64Value: + return types.Uint(c.GetUint64Value()), nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) +} diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go index b63884a602..7b043a15bb 100644 --- a/vendor/github.com/google/cel-go/common/ast/expr.go +++ b/vendor/github.com/google/cel-go/common/ast/expr.go @@ -15,168 +15,61 @@ package ast import ( - "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // ExprKind represents the expression node kind. type ExprKind int const ( - // UnspecifiedKind represents an unset expression with no specified properties. - UnspecifiedKind ExprKind = iota + // UnspecifiedExprKind represents an unset expression with no specified properties. + UnspecifiedExprKind ExprKind = iota - // LiteralKind represents a primitive scalar literal. - LiteralKind + // CallKind represents a function call. + CallKind + + // ComprehensionKind represents a comprehension expression generated by a macro. + ComprehensionKind // IdentKind represents a simple variable, constant, or type identifier. IdentKind - // SelectKind represents a field selection expression. - SelectKind - - // CallKind represents a function call. - CallKind - // ListKind represents a list literal expression. ListKind + // LiteralKind represents a primitive scalar literal. + LiteralKind + // MapKind represents a map literal expression. MapKind + // SelectKind represents a field selection expression. + SelectKind + // StructKind represents a struct literal expression. StructKind - - // ComprehensionKind represents a comprehension expression generated by a macro. - ComprehensionKind ) -// NavigateCheckedAST converts a CheckedAST to a NavigableExpr -func NavigateCheckedAST(ast *CheckedAST) NavigableExpr { - return newNavigableExpr(nil, ast.Expr, ast.TypeMap) -} - -// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. +// Expr represents the base expression node in a CEL abstract syntax tree. // -// This function type should be use with the `Match` and `MatchList` calls. -type ExprMatcher func(NavigableExpr) bool - -// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr -// is comprised of all constant values, such as a simple literal or even list and map literal. -func ConstantValueMatcher() ExprMatcher { - return matchIsConstantValue -} - -// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches -// the specified `kind`. -func KindMatcher(kind ExprKind) ExprMatcher { - return func(e NavigableExpr) bool { - return e.Kind() == kind - } -} - -// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose -// function name is equal to `funcName`. -func FunctionMatcher(funcName string) ExprMatcher { - return func(e NavigableExpr) bool { - if e.Kind() != CallKind { - return false - } - return e.AsCall().FunctionName() == funcName - } -} - -// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. -// -// Such a result would work well with subsequent MatchList calls. -func AllMatcher() ExprMatcher { - return func(NavigableExpr) bool { - return true - } -} - -// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the -// descendants which match. -func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { - return matchListInternal([]NavigableExpr{expr}, matcher, true) -} - -// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a -// subset of NavigableExpr values which match. -func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { - visit := make([]NavigableExpr, len(exprs)) - copy(visit, exprs) - return matchListInternal(visit, matcher, false) -} - -func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr { - var matched []NavigableExpr - for len(visit) != 0 { - e := visit[0] - if matcher(e) { - matched = append(matched, e) - } - if visitDescendants { - visit = append(visit[1:], e.Children()...) - } else { - visit = visit[1:] - } - } - return matched -} - -func matchIsConstantValue(e NavigableExpr) bool { - if e.Kind() == LiteralKind { - return true - } - if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { - for _, child := range e.Children() { - if !matchIsConstantValue(child) { - return false - } - } - return true - } - return false -} - -// NavigableExpr represents the base navigable expression value. -// -// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types +// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types // as indicated by the `As` methods. -// -// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr -// to the wrong kind should produce a nil value. -type NavigableExpr interface { +type Expr interface { // ID of the expression as it appears in the AST ID() int64 // Kind of the expression node. See ExprKind for the valid enum values. Kind() ExprKind - // Type of the expression node. - Type() *types.Type - - // Parent returns the parent expression node, if one exists. - Parent() (NavigableExpr, bool) - - // Children returns a list of child expression nodes. - Children() []NavigableExpr - - // ToExpr adapts this NavigableExpr to a protobuf representation. - ToExpr() *exprpb.Expr - - // AsCall adapts the expr into a NavigableCallExpr + // AsCall adapts the expr into a CallExpr // // The Kind() must be equal to a CallKind for the conversion to be well-defined. - AsCall() NavigableCallExpr + AsCall() CallExpr - // AsComprehension adapts the expr into a NavigableComprehensionExpr. + // AsComprehension adapts the expr into a ComprehensionExpr. // // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined. - AsComprehension() NavigableComprehensionExpr + AsComprehension() ComprehensionExpr // AsIdent adapts the expr into an identifier string. // @@ -188,67 +81,123 @@ type NavigableExpr interface { // The Kind() must be equal to a LiteralKind for the conversion to be well-defined. AsLiteral() ref.Val - // AsList adapts the expr into a NavigableListExpr. + // AsList adapts the expr into a ListExpr. // // The Kind() must be equal to a ListKind for the conversion to be well-defined. - AsList() NavigableListExpr + AsList() ListExpr - // AsMap adapts the expr into a NavigableMapExpr. + // AsMap adapts the expr into a MapExpr. // // The Kind() must be equal to a MapKind for the conversion to be well-defined. - AsMap() NavigableMapExpr + AsMap() MapExpr - // AsSelect adapts the expr into a NavigableSelectExpr. + // AsSelect adapts the expr into a SelectExpr. // // The Kind() must be equal to a SelectKind for the conversion to be well-defined. - AsSelect() NavigableSelectExpr + AsSelect() SelectExpr - // AsStruct adapts the expr into a NavigableStructExpr. + // AsStruct adapts the expr into a StructExpr. // // The Kind() must be equal to a StructKind for the conversion to be well-defined. - AsStruct() NavigableStructExpr + AsStruct() StructExpr - // marker interface method - isNavigable() + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + // SetKindCase replaces the contents of the current expression with the contents of the other. + // + // The SetKindCase takes ownership of any expression instances references within the input Expr. + // A shallow copy is made of the Expr value itself, but not a deep one. + // + // This method should only be used during AST rewrites using temporary Expr values. + SetKindCase(Expr) + + // isExpr is a marker interface. + isExpr() } -// NavigableCallExpr defines an interface for inspecting a function call and its arugments. -type NavigableCallExpr interface { +// EntryExprKind represents the possible EntryExpr kinds. +type EntryExprKind int + +const ( + // UnspecifiedEntryExprKind indicates that the entry expr is not set. + UnspecifiedEntryExprKind EntryExprKind = iota + + // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions. + MapEntryKind + + // StructFieldKind indicates that the entry is a StructField with a field name and initializer + // expression. + StructFieldKind +) + +// EntryExpr represents the base entry expression in a CEL map or struct literal. +type EntryExpr interface { + // ID of the entry as it appears in the AST. + ID() int64 + + // Kind of the entry expression node. See EntryExprKind for valid enum values. + Kind() EntryExprKind + + // AsMapEntry casts the EntryExpr to a MapEntry. + // + // The Kind() must be equal to MapEntryKind for the conversion to be well-defined. + AsMapEntry() MapEntry + + // AsStructField casts the EntryExpr to a StructField + // + // The Kind() must be equal to StructFieldKind for the conversion to be well-defined. + AsStructField() StructField + + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + isEntryExpr() +} + +// IDGenerator produces unique ids suitable for tagging expression nodes +type IDGenerator func(originalID int64) int64 + +// CallExpr defines an interface for inspecting a function call and its arguments. +type CallExpr interface { // FunctionName returns the name of the function. FunctionName() string + // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function + IsMemberFunction() bool + // Target returns the target of the expression if one is present. - Target() NavigableExpr + Target() Expr // Args returns the list of call arguments, excluding the target. - Args() []NavigableExpr - - // ReturnType returns the result type of the call. - ReturnType() *types.Type + Args() []Expr // marker interface method - isNavigable() + isExpr() } -// NavigableListExpr defines an interface for inspecting a list literal expression. -type NavigableListExpr interface { +// ListExpr defines an interface for inspecting a list literal expression. +type ListExpr interface { // Elements returns the list elements as navigable expressions. - Elements() []NavigableExpr + Elements() []Expr // OptionalIndicies returns the list of optional indices in the list literal. OptionalIndices() []int32 + // IsOptional indicates whether the given element index is optional. + IsOptional(int32) bool + // Size returns the number of elements in the list. Size() int // marker interface method - isNavigable() + isExpr() } -// NavigableSelectExpr defines an interface for inspecting a select expression. -type NavigableSelectExpr interface { +// SelectExpr defines an interface for inspecting a select expression. +type SelectExpr interface { // Operand returns the selection operand expression. - Operand() NavigableExpr + Operand() Expr // FieldName returns the field name being selected from the operand. FieldName() string @@ -257,67 +206,67 @@ type NavigableSelectExpr interface { IsTestOnly() bool // marker interface method - isNavigable() + isExpr() } -// NavigableMapExpr defines an interface for inspecting a map expression. -type NavigableMapExpr interface { - // Entries returns the map key value pairs as NavigableEntry values. - Entries() []NavigableEntry +// MapExpr defines an interface for inspecting a map expression. +type MapExpr interface { + // Entries returns the map key value pairs as EntryExpr values. + Entries() []EntryExpr // Size returns the number of entries in the map. Size() int // marker interface method - isNavigable() + isExpr() } -// NavigableEntry defines an interface for inspecting a map entry. -type NavigableEntry interface { +// MapEntry defines an interface for inspecting a map entry. +type MapEntry interface { // Key returns the map entry key expression. - Key() NavigableExpr + Key() Expr // Value returns the map entry value expression. - Value() NavigableExpr + Value() Expr // IsOptional returns whether the entry is optional. IsOptional() bool // marker interface method - isNavigable() + isEntryExpr() } -// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers. -type NavigableStructExpr interface { +// StructExpr defines an interfaces for inspecting a struct and its field initializers. +type StructExpr interface { // TypeName returns the struct type name. TypeName() string - // Fields returns the set of field initializers in the struct expression as NavigableField values. - Fields() []NavigableField + // Fields returns the set of field initializers in the struct expression as EntryExpr values. + Fields() []EntryExpr // marker interface method - isNavigable() + isExpr() } -// NavigableField defines an interface for inspecting a struct field initialization. -type NavigableField interface { - // FieldName returns the name of the field. - FieldName() string +// StructField defines an interface for inspecting a struct field initialization. +type StructField interface { + // Name returns the name of the field. + Name() string // Value returns the field initialization expression. - Value() NavigableExpr + Value() Expr // IsOptional returns whether the field is optional. IsOptional() bool // marker interface method - isNavigable() + isEntryExpr() } -// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression. -type NavigableComprehensionExpr interface { +// ComprehensionExpr defines an interface for inspecting a comprehension expression. +type ComprehensionExpr interface { // IterRange returns the iteration range expression. - IterRange() NavigableExpr + IterRange() Expr // IterVar returns the iteration variable name. IterVar() string @@ -326,384 +275,586 @@ type NavigableComprehensionExpr interface { AccuVar() string // AccuInit returns the accumulation variable initialization expression. - AccuInit() NavigableExpr + AccuInit() Expr // LoopCondition returns the loop condition expression. - LoopCondition() NavigableExpr + LoopCondition() Expr // LoopStep returns the loop step expression. - LoopStep() NavigableExpr + LoopStep() Expr // Result returns the comprehension result expression. - Result() NavigableExpr + Result() Expr // marker interface method - isNavigable() + isExpr() } -func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr { - kind, factory := kindOf(expr) - nav := &navigableExprImpl{ - parent: parent, - kind: kind, - expr: expr, - typeMap: typeMap, - createChildren: factory, +var _ Expr = &expr{} + +type expr struct { + id int64 + exprKindCase +} + +type exprKindCase interface { + Kind() ExprKind + + renumberIDs(IDGenerator) + + isExpr() +} + +func (e *expr) ID() int64 { + if e == nil { + return 0 } - return nav + return e.id } -type navigableExprImpl struct { - parent NavigableExpr - kind ExprKind - expr *exprpb.Expr - typeMap map[int64]*types.Type - createChildren childFactory +func (e *expr) Kind() ExprKind { + if e == nil || e.exprKindCase == nil { + return UnspecifiedExprKind + } + return e.exprKindCase.Kind() } -func (nav *navigableExprImpl) ID() int64 { - return nav.ToExpr().GetId() +func (e *expr) AsCall() CallExpr { + if e.Kind() != CallKind { + return nilCall + } + return e.exprKindCase.(CallExpr) } -func (nav *navigableExprImpl) Kind() ExprKind { - return nav.kind +func (e *expr) AsComprehension() ComprehensionExpr { + if e.Kind() != ComprehensionKind { + return nilCompre + } + return e.exprKindCase.(ComprehensionExpr) } -func (nav *navigableExprImpl) Type() *types.Type { - if t, found := nav.typeMap[nav.ID()]; found { - return t +func (e *expr) AsIdent() string { + if e.Kind() != IdentKind { + return "" } - return types.DynType + return string(e.exprKindCase.(baseIdentExpr)) } -func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { - if nav.parent != nil { - return nav.parent, true +func (e *expr) AsLiteral() ref.Val { + if e.Kind() != LiteralKind { + return nil } - return nil, false + return e.exprKindCase.(*baseLiteral).Val } -func (nav *navigableExprImpl) Children() []NavigableExpr { - return nav.createChildren(nav) +func (e *expr) AsList() ListExpr { + if e.Kind() != ListKind { + return nilList + } + return e.exprKindCase.(ListExpr) } -func (nav *navigableExprImpl) ToExpr() *exprpb.Expr { - return nav.expr +func (e *expr) AsMap() MapExpr { + if e.Kind() != MapKind { + return nilMap + } + return e.exprKindCase.(MapExpr) } -func (nav *navigableExprImpl) AsCall() NavigableCallExpr { - return navigableCallImpl{navigableExprImpl: nav} +func (e *expr) AsSelect() SelectExpr { + if e.Kind() != SelectKind { + return nilSel + } + return e.exprKindCase.(SelectExpr) } -func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr { - return navigableComprehensionImpl{navigableExprImpl: nav} +func (e *expr) AsStruct() StructExpr { + if e.Kind() != StructKind { + return nilStruct + } + return e.exprKindCase.(StructExpr) } -func (nav *navigableExprImpl) AsIdent() string { - return nav.ToExpr().GetIdentExpr().GetName() +func (e *expr) SetKindCase(other Expr) { + if e == nil { + return + } + if other == nil { + e.exprKindCase = nil + return + } + switch other.Kind() { + case CallKind: + c := other.AsCall() + e.exprKindCase = &baseCallExpr{ + function: c.FunctionName(), + target: c.Target(), + args: c.Args(), + isMember: c.IsMemberFunction(), + } + case ComprehensionKind: + c := other.AsComprehension() + e.exprKindCase = &baseComprehensionExpr{ + iterRange: c.IterRange(), + iterVar: c.IterVar(), + accuVar: c.AccuVar(), + accuInit: c.AccuInit(), + loopCond: c.LoopCondition(), + loopStep: c.LoopStep(), + result: c.Result(), + } + case IdentKind: + e.exprKindCase = baseIdentExpr(other.AsIdent()) + case ListKind: + l := other.AsList() + optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices())) + for _, idx := range l.OptionalIndices() { + optIndexMap[idx] = struct{}{} + } + e.exprKindCase = &baseListExpr{ + elements: l.Elements(), + optIndices: l.OptionalIndices(), + optIndexMap: optIndexMap, + } + case LiteralKind: + e.exprKindCase = &baseLiteral{Val: other.AsLiteral()} + case MapKind: + e.exprKindCase = &baseMapExpr{ + entries: other.AsMap().Entries(), + } + case SelectKind: + s := other.AsSelect() + e.exprKindCase = &baseSelectExpr{ + operand: s.Operand(), + field: s.FieldName(), + testOnly: s.IsTestOnly(), + } + case StructKind: + s := other.AsStruct() + e.exprKindCase = &baseStructExpr{ + typeName: s.TypeName(), + fields: s.Fields(), + } + case UnspecifiedExprKind: + e.exprKindCase = nil + } } -func (nav *navigableExprImpl) AsLiteral() ref.Val { - if nav.Kind() != LiteralKind { - return nil +func (e *expr) RenumberIDs(idGen IDGenerator) { + if e == nil { + return } - val, err := ConstantToVal(nav.ToExpr().GetConstExpr()) - if err != nil { - panic(err) + e.id = idGen(e.id) + if e.exprKindCase != nil { + e.exprKindCase.renumberIDs(idGen) } - return val } -func (nav *navigableExprImpl) AsList() NavigableListExpr { - return navigableListImpl{navigableExprImpl: nav} +type baseCallExpr struct { + function string + target Expr + args []Expr + isMember bool } -func (nav *navigableExprImpl) AsMap() NavigableMapExpr { - return navigableMapImpl{navigableExprImpl: nav} +func (*baseCallExpr) Kind() ExprKind { + return CallKind } -func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr { - return navigableSelectImpl{navigableExprImpl: nav} +func (e *baseCallExpr) FunctionName() string { + if e == nil { + return "" + } + return e.function } -func (nav *navigableExprImpl) AsStruct() NavigableStructExpr { - return navigableStructImpl{navigableExprImpl: nav} +func (e *baseCallExpr) IsMemberFunction() bool { + if e == nil { + return false + } + return e.isMember } -func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr { - return newNavigableExpr(nav, e, nav.typeMap) +func (e *baseCallExpr) Target() Expr { + if e == nil || !e.IsMemberFunction() { + return nilExpr + } + return e.target } -func (nav *navigableExprImpl) isNavigable() {} +func (e *baseCallExpr) Args() []Expr { + if e == nil { + return []Expr{} + } + return e.args +} -type navigableCallImpl struct { - *navigableExprImpl +func (e *baseCallExpr) renumberIDs(idGen IDGenerator) { + if e.IsMemberFunction() { + e.Target().RenumberIDs(idGen) + } + for _, arg := range e.Args() { + arg.RenumberIDs(idGen) + } } -func (call navigableCallImpl) FunctionName() string { - return call.ToExpr().GetCallExpr().GetFunction() +func (*baseCallExpr) isExpr() {} + +var _ ComprehensionExpr = &baseComprehensionExpr{} + +type baseComprehensionExpr struct { + iterRange Expr + iterVar string + accuVar string + accuInit Expr + loopCond Expr + loopStep Expr + result Expr } -func (call navigableCallImpl) Target() NavigableExpr { - t := call.ToExpr().GetCallExpr().GetTarget() - if t != nil { - return call.createChild(t) - } - return nil +func (*baseComprehensionExpr) Kind() ExprKind { + return ComprehensionKind } -func (call navigableCallImpl) Args() []NavigableExpr { - args := call.ToExpr().GetCallExpr().GetArgs() - navArgs := make([]NavigableExpr, len(args)) - for i, a := range args { - navArgs[i] = call.createChild(a) +func (e *baseComprehensionExpr) IterRange() Expr { + if e == nil { + return nilExpr } - return navArgs + return e.iterRange } -func (call navigableCallImpl) ReturnType() *types.Type { - return call.Type() +func (e *baseComprehensionExpr) IterVar() string { + return e.iterVar } -type navigableComprehensionImpl struct { - *navigableExprImpl +func (e *baseComprehensionExpr) AccuVar() string { + return e.accuVar } -func (comp navigableComprehensionImpl) IterRange() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange()) +func (e *baseComprehensionExpr) AccuInit() Expr { + if e == nil { + return nilExpr + } + return e.accuInit } -func (comp navigableComprehensionImpl) IterVar() string { - return comp.ToExpr().GetComprehensionExpr().GetIterVar() +func (e *baseComprehensionExpr) LoopCondition() Expr { + if e == nil { + return nilExpr + } + return e.loopCond } -func (comp navigableComprehensionImpl) AccuVar() string { - return comp.ToExpr().GetComprehensionExpr().GetAccuVar() +func (e *baseComprehensionExpr) LoopStep() Expr { + if e == nil { + return nilExpr + } + return e.loopStep } -func (comp navigableComprehensionImpl) AccuInit() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit()) +func (e *baseComprehensionExpr) Result() Expr { + if e == nil { + return nilExpr + } + return e.result } -func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition()) +func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) { + e.IterRange().RenumberIDs(idGen) + e.AccuInit().RenumberIDs(idGen) + e.LoopCondition().RenumberIDs(idGen) + e.LoopStep().RenumberIDs(idGen) + e.Result().RenumberIDs(idGen) } -func (comp navigableComprehensionImpl) LoopStep() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep()) +func (*baseComprehensionExpr) isExpr() {} + +var _ exprKindCase = baseIdentExpr("") + +type baseIdentExpr string + +func (baseIdentExpr) Kind() ExprKind { + return IdentKind } -func (comp navigableComprehensionImpl) Result() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult()) +func (e baseIdentExpr) renumberIDs(IDGenerator) {} + +func (baseIdentExpr) isExpr() {} + +var _ exprKindCase = &baseLiteral{} +var _ ref.Val = &baseLiteral{} + +type baseLiteral struct { + ref.Val } -type navigableListImpl struct { - *navigableExprImpl +func (*baseLiteral) Kind() ExprKind { + return LiteralKind } -func (l navigableListImpl) Elements() []NavigableExpr { - return l.Children() +func (l *baseLiteral) renumberIDs(IDGenerator) {} + +func (*baseLiteral) isExpr() {} + +var _ ListExpr = &baseListExpr{} + +type baseListExpr struct { + elements []Expr + optIndices []int32 + optIndexMap map[int32]struct{} } -func (l navigableListImpl) OptionalIndices() []int32 { - return l.ToExpr().GetListExpr().GetOptionalIndices() +func (*baseListExpr) Kind() ExprKind { + return ListKind } -func (l navigableListImpl) Size() int { - return len(l.ToExpr().GetListExpr().GetElements()) +func (e *baseListExpr) Elements() []Expr { + if e == nil { + return []Expr{} + } + return e.elements } -type navigableMapImpl struct { - *navigableExprImpl +func (e *baseListExpr) IsOptional(index int32) bool { + _, found := e.optIndexMap[index] + return found } -func (m navigableMapImpl) Entries() []NavigableEntry { - mapExpr := m.ToExpr().GetStructExpr() - entries := make([]NavigableEntry, len(mapExpr.GetEntries())) - for i, e := range mapExpr.GetEntries() { - entries[i] = navigableEntryImpl{ - key: m.createChild(e.GetMapKey()), - val: m.createChild(e.GetValue()), - isOpt: e.GetOptionalEntry(), - } +func (e *baseListExpr) OptionalIndices() []int32 { + if e == nil { + return []int32{} } - return entries + return e.optIndices } -func (m navigableMapImpl) Size() int { - return len(m.ToExpr().GetStructExpr().GetEntries()) +func (e *baseListExpr) Size() int { + return len(e.Elements()) } -type navigableEntryImpl struct { - key NavigableExpr - val NavigableExpr - isOpt bool +func (e *baseListExpr) renumberIDs(idGen IDGenerator) { + for _, elem := range e.Elements() { + elem.RenumberIDs(idGen) + } } -func (e navigableEntryImpl) Key() NavigableExpr { - return e.key -} +func (*baseListExpr) isExpr() {} -func (e navigableEntryImpl) Value() NavigableExpr { - return e.val +type baseMapExpr struct { + entries []EntryExpr } -func (e navigableEntryImpl) IsOptional() bool { - return e.isOpt +func (*baseMapExpr) Kind() ExprKind { + return MapKind } -func (e navigableEntryImpl) isNavigable() {} +func (e *baseMapExpr) Entries() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.entries +} -type navigableSelectImpl struct { - *navigableExprImpl +func (e *baseMapExpr) Size() int { + return len(e.Entries()) } -func (sel navigableSelectImpl) FieldName() string { - return sel.ToExpr().GetSelectExpr().GetField() +func (e *baseMapExpr) renumberIDs(idGen IDGenerator) { + for _, entry := range e.Entries() { + entry.RenumberIDs(idGen) + } } -func (sel navigableSelectImpl) IsTestOnly() bool { - return sel.ToExpr().GetSelectExpr().GetTestOnly() +func (*baseMapExpr) isExpr() {} + +type baseSelectExpr struct { + operand Expr + field string + testOnly bool } -func (sel navigableSelectImpl) Operand() NavigableExpr { - return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand()) +func (*baseSelectExpr) Kind() ExprKind { + return SelectKind } -type navigableStructImpl struct { - *navigableExprImpl +func (e *baseSelectExpr) Operand() Expr { + if e == nil || e.operand == nil { + return nilExpr + } + return e.operand } -func (s navigableStructImpl) TypeName() string { - return s.ToExpr().GetStructExpr().GetMessageName() +func (e *baseSelectExpr) FieldName() string { + if e == nil { + return "" + } + return e.field } -func (s navigableStructImpl) Fields() []NavigableField { - fieldInits := s.ToExpr().GetStructExpr().GetEntries() - fields := make([]NavigableField, len(fieldInits)) - for i, f := range fieldInits { - fields[i] = navigableFieldImpl{ - name: f.GetFieldKey(), - val: s.createChild(f.GetValue()), - isOpt: f.GetOptionalEntry(), - } +func (e *baseSelectExpr) IsTestOnly() bool { + if e == nil { + return false } - return fields + return e.testOnly } -type navigableFieldImpl struct { - name string - val NavigableExpr - isOpt bool +func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) { + e.Operand().RenumberIDs(idGen) } -func (f navigableFieldImpl) FieldName() string { - return f.name +func (*baseSelectExpr) isExpr() {} + +type baseStructExpr struct { + typeName string + fields []EntryExpr } -func (f navigableFieldImpl) Value() NavigableExpr { - return f.val +func (*baseStructExpr) Kind() ExprKind { + return StructKind } -func (f navigableFieldImpl) IsOptional() bool { - return f.isOpt +func (e *baseStructExpr) TypeName() string { + if e == nil { + return "" + } + return e.typeName } -func (f navigableFieldImpl) isNavigable() {} +func (e *baseStructExpr) Fields() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.fields +} -func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) { - switch expr.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - return LiteralKind, noopFactory - case *exprpb.Expr_IdentExpr: - return IdentKind, noopFactory - case *exprpb.Expr_SelectExpr: - return SelectKind, selectFactory - case *exprpb.Expr_CallExpr: - return CallKind, callArgFactory - case *exprpb.Expr_ListExpr: - return ListKind, listElemFactory - case *exprpb.Expr_StructExpr: - if expr.GetStructExpr().GetMessageName() != "" { - return StructKind, structEntryFactory - } - return MapKind, mapEntryFactory - case *exprpb.Expr_ComprehensionExpr: - return ComprehensionKind, comprehensionFactory - default: - return UnspecifiedKind, noopFactory +func (e *baseStructExpr) renumberIDs(idGen IDGenerator) { + for _, f := range e.Fields() { + f.RenumberIDs(idGen) } } -type childFactory func(*navigableExprImpl) []NavigableExpr +func (*baseStructExpr) isExpr() {} + +type entryExprKindCase interface { + Kind() EntryExprKind + + renumberIDs(IDGenerator) + + isEntryExpr() +} + +var _ EntryExpr = &entryExpr{} + +type entryExpr struct { + id int64 + entryExprKindCase +} -func noopFactory(*navigableExprImpl) []NavigableExpr { - return nil +func (e *entryExpr) ID() int64 { + return e.id } -func selectFactory(nav *navigableExprImpl) []NavigableExpr { - return []NavigableExpr{ - nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()), +func (e *entryExpr) AsMapEntry() MapEntry { + if e.Kind() != MapEntryKind { + return nilMapEntry } + return e.entryExprKindCase.(MapEntry) } -func callArgFactory(nav *navigableExprImpl) []NavigableExpr { - call := nav.ToExpr().GetCallExpr() - argCount := len(call.GetArgs()) - if call.GetTarget() != nil { - argCount++ +func (e *entryExpr) AsStructField() StructField { + if e.Kind() != StructFieldKind { + return nilStructField } - navExprs := make([]NavigableExpr, argCount) - i := 0 - if call.GetTarget() != nil { - navExprs[i] = nav.createChild(call.GetTarget()) - i++ + return e.entryExprKindCase.(StructField) +} + +func (e *entryExpr) RenumberIDs(idGen IDGenerator) { + e.id = idGen(e.id) + e.entryExprKindCase.renumberIDs(idGen) +} + +type baseMapEntry struct { + key Expr + value Expr + isOptional bool +} + +func (e *baseMapEntry) Kind() EntryExprKind { + return MapEntryKind +} + +func (e *baseMapEntry) Key() Expr { + if e == nil { + return nilExpr } - for _, arg := range call.GetArgs() { - navExprs[i] = nav.createChild(arg) - i++ + return e.key +} + +func (e *baseMapEntry) Value() Expr { + if e == nil { + return nilExpr } - return navExprs + return e.value } -func listElemFactory(nav *navigableExprImpl) []NavigableExpr { - l := nav.ToExpr().GetListExpr() - navExprs := make([]NavigableExpr, len(l.GetElements())) - for i, e := range l.GetElements() { - navExprs[i] = nav.createChild(e) +func (e *baseMapEntry) IsOptional() bool { + if e == nil { + return false } - return navExprs + return e.isOptional } -func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { - s := nav.ToExpr().GetStructExpr() - entries := make([]NavigableExpr, len(s.GetEntries())) - for i, e := range s.GetEntries() { +func (e *baseMapEntry) renumberIDs(idGen IDGenerator) { + e.Key().RenumberIDs(idGen) + e.Value().RenumberIDs(idGen) +} + +func (*baseMapEntry) isEntryExpr() {} + +type baseStructField struct { + field string + value Expr + isOptional bool +} + +func (f *baseStructField) Kind() EntryExprKind { + return StructFieldKind +} - entries[i] = nav.createChild(e.GetValue()) +func (f *baseStructField) Name() string { + if f == nil { + return "" } - return entries + return f.field } -func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { - s := nav.ToExpr().GetStructExpr() - entries := make([]NavigableExpr, len(s.GetEntries())*2) - j := 0 - for _, e := range s.GetEntries() { - entries[j] = nav.createChild(e.GetMapKey()) - entries[j+1] = nav.createChild(e.GetValue()) - j += 2 +func (f *baseStructField) Value() Expr { + if f == nil { + return nilExpr } - return entries + return f.value } -func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { - compre := nav.ToExpr().GetComprehensionExpr() - return []NavigableExpr{ - nav.createChild(compre.GetIterRange()), - nav.createChild(compre.GetAccuInit()), - nav.createChild(compre.GetLoopCondition()), - nav.createChild(compre.GetLoopStep()), - nav.createChild(compre.GetResult()), +func (f *baseStructField) IsOptional() bool { + if f == nil { + return false } + return f.isOptional } + +func (f *baseStructField) renumberIDs(idGen IDGenerator) { + f.Value().RenumberIDs(idGen) +} + +func (*baseStructField) isEntryExpr() {} + +var ( + nilExpr *expr = nil + nilCall *baseCallExpr = nil + nilCompre *baseComprehensionExpr = nil + nilList *baseListExpr = nil + nilMap *baseMapExpr = nil + nilMapEntry *baseMapEntry = nil + nilSel *baseSelectExpr = nil + nilStruct *baseStructExpr = nil + nilStructField *baseStructField = nil +) diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go new file mode 100644 index 0000000000..b7f36e72a4 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/factory.go @@ -0,0 +1,303 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "github.com/google/cel-go/common/types/ref" + +// ExprFactory interfaces defines a set of methods necessary for building native expression values. +type ExprFactory interface { + // CopyExpr creates a deep copy of the input Expr value. + CopyExpr(Expr) Expr + + // CopyEntryExpr creates a deep copy of the input EntryExpr value. + CopyEntryExpr(EntryExpr) EntryExpr + + // NewCall creates an Expr value representing a global function call. + NewCall(id int64, function string, args ...Expr) Expr + + // NewComprehension creates an Expr value representing a comprehension over a value range. + NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + + // NewMemberCall creates an Expr value representing a member function call. + NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr + + // NewIdent creates an Expr value representing an identifier. + NewIdent(id int64, name string) Expr + + // NewAccuIdent creates an Expr value representing an accumulator identifier within a + //comprehension. + NewAccuIdent(id int64) Expr + + // NewLiteral creates an Expr value representing a literal value, such as a string or integer. + NewLiteral(id int64, value ref.Val) Expr + + // NewList creates an Expr value representing a list literal expression with optional indices. + // + // Optional indicies will typically be empty unless the CEL optional types are enabled. + NewList(id int64, elems []Expr, optIndices []int32) Expr + + // NewMap creates an Expr value representing a map literal expression + NewMap(id int64, entries []EntryExpr) Expr + + // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether + // the key is optionally set. + NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr + + // NewPresenceTest creates an Expr representing a field presence test on an operand expression. + NewPresenceTest(id int64, operand Expr, field string) Expr + + // NewSelect creates an Expr representing a field selection on an operand expression. + NewSelect(id int64, operand Expr, field string) Expr + + // NewStruct creates an Expr value representing a struct literal with a given type name and a + // set of field initializers. + NewStruct(id int64, typeName string, fields []EntryExpr) Expr + + // NewStructField creates a StructField with a given field name, value, and a flag indicating + // whether the field is optionally set. + NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr + + // NewUnspecifiedExpr creates an empty expression node. + NewUnspecifiedExpr(id int64) Expr + + isExprFactory() +} + +type baseExprFactory struct{} + +// NewExprFactory creates an ExprFactory instance. +func NewExprFactory() ExprFactory { + return &baseExprFactory{} +} + +func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: nilExpr, + args: args, + isMember: false, + }) +} + +func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: target, + args: args, + isMember: true, + }) +} + +func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { + return fac.newExpr( + id, + &baseComprehensionExpr{ + iterRange: iterRange, + iterVar: iterVar, + accuVar: accuVar, + accuInit: accuInit, + loopCond: loopCond, + loopStep: loopStep, + result: result, + }) +} + +func (fac *baseExprFactory) NewIdent(id int64, name string) Expr { + return fac.newExpr(id, baseIdentExpr(name)) +} + +func (fac *baseExprFactory) NewAccuIdent(id int64) Expr { + return fac.NewIdent(id, "__result__") +} + +func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr { + return fac.newExpr(id, &baseLiteral{Val: value}) +} + +func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr { + optIndexMap := make(map[int32]struct{}, len(optIndices)) + for _, idx := range optIndices { + optIndexMap[idx] = struct{}{} + } + return fac.newExpr(id, + &baseListExpr{ + elements: elems, + optIndices: optIndices, + optIndexMap: optIndexMap, + }) +} + +func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr { + return fac.newExpr(id, &baseMapExpr{entries: entries}) +} + +func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseMapEntry{ + key: key, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + testOnly: true, + }) +} + +func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + }) +} + +func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr { + return fac.newExpr( + id, + &baseStructExpr{ + typeName: typeName, + fields: fields, + }) +} + +func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseStructField{ + field: field, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr { + return fac.newExpr(id, nil) +} + +func (fac *baseExprFactory) CopyExpr(e Expr) Expr { + // unwrap navigable expressions to avoid unnecessary allocations during copying. + if nav, ok := e.(*navigableExprImpl); ok { + e = nav.Expr + } + switch e.Kind() { + case CallKind: + c := e.AsCall() + argsCopy := make([]Expr, len(c.Args())) + for i, arg := range c.Args() { + argsCopy[i] = fac.CopyExpr(arg) + } + if !c.IsMemberFunction() { + return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...) + } + return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...) + case ComprehensionKind: + compre := e.AsComprehension() + return fac.NewComprehension(e.ID(), + fac.CopyExpr(compre.IterRange()), + compre.IterVar(), + compre.AccuVar(), + fac.CopyExpr(compre.AccuInit()), + fac.CopyExpr(compre.LoopCondition()), + fac.CopyExpr(compre.LoopStep()), + fac.CopyExpr(compre.Result())) + case IdentKind: + return fac.NewIdent(e.ID(), e.AsIdent()) + case ListKind: + l := e.AsList() + elemsCopy := make([]Expr, l.Size()) + for i, elem := range l.Elements() { + elemsCopy[i] = fac.CopyExpr(elem) + } + return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices()) + case LiteralKind: + return fac.NewLiteral(e.ID(), e.AsLiteral()) + case MapKind: + m := e.AsMap() + entriesCopy := make([]EntryExpr, m.Size()) + for i, entry := range m.Entries() { + entriesCopy[i] = fac.CopyEntryExpr(entry) + } + return fac.NewMap(e.ID(), entriesCopy) + case SelectKind: + s := e.AsSelect() + if s.IsTestOnly() { + return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + } + return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + case StructKind: + s := e.AsStruct() + fieldsCopy := make([]EntryExpr, len(s.Fields())) + for i, field := range s.Fields() { + fieldsCopy[i] = fac.CopyEntryExpr(field) + } + return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy) + default: + return fac.NewUnspecifiedExpr(e.ID()) + } +} + +func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr { + switch e.Kind() { + case MapEntryKind: + entry := e.AsMapEntry() + return fac.NewMapEntry(e.ID(), + fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional()) + case StructFieldKind: + field := e.AsStructField() + return fac.NewStructField(e.ID(), + field.Name(), fac.CopyExpr(field.Value()), field.IsOptional()) + default: + return fac.newEntryExpr(e.ID(), nil) + } +} + +func (*baseExprFactory) isExprFactory() {} + +func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr { + return &expr{ + id: id, + exprKindCase: e, + } +} + +func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr { + return &entryExpr{ + id: id, + entryExprKindCase: e, + } +} + +var ( + defaultFactory = &baseExprFactory{} +) diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go new file mode 100644 index 0000000000..f5ddf6aac6 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/navigable.go @@ -0,0 +1,652 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// NavigableExpr represents the base navigable expression value with methods to inspect the +// parent and child expressions. +type NavigableExpr interface { + Expr + + // Type of the expression. + // + // If the expression is type-checked, the type check metadata is returned. If the expression + // has not been type-checked, the types.DynType value is returned. + Type() *types.Type + + // Parent returns the parent expression node, if one exists. + Parent() (NavigableExpr, bool) + + // Children returns a list of child expression nodes. + Children() []NavigableExpr + + // Depth indicates the depth in the expression tree. + // + // The root expression has depth 0. + Depth() int +} + +// NavigateAST converts an AST to a NavigableExpr +func NavigateAST(ast *AST) NavigableExpr { + return NavigateExpr(ast, ast.Expr()) +} + +// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST. +// +// If the expression is already a NavigableExpr, the parent and depth information will be +// propagated on the new NavigableExpr value; otherwise, the expr value will be treated +// as though it is the root of the expression graph with a depth of 0. +func NavigateExpr(ast *AST, expr Expr) NavigableExpr { + depth := 0 + var parent NavigableExpr = nil + if nav, ok := expr.(NavigableExpr); ok { + depth = nav.Depth() + parent, _ = nav.Parent() + } + return newNavigableExpr(ast, parent, expr, depth) +} + +// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. +// +// This function type should be use with the `Match` and `MatchList` calls. +type ExprMatcher func(NavigableExpr) bool + +// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr +// is comprised of all constant values, such as a simple literal or even list and map literal. +func ConstantValueMatcher() ExprMatcher { + return matchIsConstantValue +} + +// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches +// the specified `kind`. +func KindMatcher(kind ExprKind) ExprMatcher { + return func(e NavigableExpr) bool { + return e.Kind() == kind + } +} + +// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose +// function name is equal to `funcName`. +func FunctionMatcher(funcName string) ExprMatcher { + return func(e NavigableExpr) bool { + if e.Kind() != CallKind { + return false + } + return e.AsCall().FunctionName() == funcName + } +} + +// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. +// +// Such a result would work well with subsequent MatchList calls. +func AllMatcher() ExprMatcher { + return func(NavigableExpr) bool { + return true + } +} + +// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values +// matching the input criteria in post-order (bottom up). +func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + visit(expr, navVisitor, postOrder, 0, 0) + return matches +} + +// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a +// subset of NavigableExpr values which match. +func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + for _, expr := range exprs { + visit(expr, navVisitor, postOrder, 0, 1) + } + return matches +} + +// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph. +type Visitor interface { + // VisitExpr visits the input expression. + VisitExpr(Expr) + + // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry. + VisitEntryExpr(EntryExpr) +} + +type baseVisitor struct { + visitExpr func(Expr) + visitEntryExpr func(EntryExpr) +} + +// VisitExpr visits the Expr if the internal expr visitor has been configured. +func (v *baseVisitor) VisitExpr(e Expr) { + if v.visitExpr != nil { + v.visitExpr(e) + } +} + +// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured. +func (v *baseVisitor) VisitEntryExpr(e EntryExpr) { + if v.visitEntryExpr != nil { + v.visitEntryExpr(e) + } +} + +// NewExprVisitor creates a visitor which only visits expression nodes. +func NewExprVisitor(v func(Expr)) Visitor { + return &baseVisitor{ + visitExpr: v, + visitEntryExpr: nil, + } +} + +// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up). +func PostOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, postOrder, 0, 0) +} + +// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down). +func PreOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, preOrder, 0, 0) +} + +type visitOrder int + +const ( + preOrder = iota + 1 + postOrder +) + +// TODO: consider exposing a way to configure a limit for the max visit depth. +// It's possible that we could want to configure this on the NewExprVisitor() +// and through MatchDescendents() / MaxID(). +func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) { + if maxDepth > 0 && depth == maxDepth { + return + } + if order == preOrder { + visitor.VisitExpr(expr) + } + switch expr.Kind() { + case CallKind: + c := expr.AsCall() + if c.IsMemberFunction() { + visit(c.Target(), visitor, order, depth+1, maxDepth) + } + for _, arg := range c.Args() { + visit(arg, visitor, order, depth+1, maxDepth) + } + case ComprehensionKind: + c := expr.AsComprehension() + visit(c.IterRange(), visitor, order, depth+1, maxDepth) + visit(c.AccuInit(), visitor, order, depth+1, maxDepth) + visit(c.LoopCondition(), visitor, order, depth+1, maxDepth) + visit(c.LoopStep(), visitor, order, depth+1, maxDepth) + visit(c.Result(), visitor, order, depth+1, maxDepth) + case ListKind: + l := expr.AsList() + for _, elem := range l.Elements() { + visit(elem, visitor, order, depth+1, maxDepth) + } + case MapKind: + m := expr.AsMap() + for _, e := range m.Entries() { + if order == preOrder { + visitor.VisitEntryExpr(e) + } + entry := e.AsMapEntry() + visit(entry.Key(), visitor, order, depth+1, maxDepth) + visit(entry.Value(), visitor, order, depth+1, maxDepth) + if order == postOrder { + visitor.VisitEntryExpr(e) + } + } + case SelectKind: + visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth) + case StructKind: + s := expr.AsStruct() + for _, f := range s.Fields() { + visitor.VisitEntryExpr(f) + visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth) + } + } + if order == postOrder { + visitor.VisitExpr(expr) + } +} + +func matchIsConstantValue(e NavigableExpr) bool { + if e.Kind() == LiteralKind { + return true + } + if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { + for _, child := range e.Children() { + if !matchIsConstantValue(child) { + return false + } + } + return true + } + return false +} + +func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr { + // Reduce navigable expression nesting by unwrapping the embedded Expr value. + if nav, ok := expr.(*navigableExprImpl); ok { + expr = nav.Expr + } + nav := &navigableExprImpl{ + Expr: expr, + depth: depth, + ast: ast, + parent: parent, + createChildren: getChildFactory(expr), + } + return nav +} + +type navigableExprImpl struct { + Expr + depth int + ast *AST + parent NavigableExpr + createChildren childFactory +} + +func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { + if nav.parent != nil { + return nav.parent, true + } + return nil, false +} + +func (nav *navigableExprImpl) ID() int64 { + return nav.Expr.ID() +} + +func (nav *navigableExprImpl) Kind() ExprKind { + return nav.Expr.Kind() +} + +func (nav *navigableExprImpl) Type() *types.Type { + return nav.ast.GetType(nav.ID()) +} + +func (nav *navigableExprImpl) Children() []NavigableExpr { + return nav.createChildren(nav) +} + +func (nav *navigableExprImpl) Depth() int { + return nav.depth +} + +func (nav *navigableExprImpl) AsCall() CallExpr { + return navigableCallImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr { + return navigableComprehensionImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsIdent() string { + return nav.Expr.AsIdent() +} + +func (nav *navigableExprImpl) AsList() ListExpr { + return navigableListImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsLiteral() ref.Val { + return nav.Expr.AsLiteral() +} + +func (nav *navigableExprImpl) AsMap() MapExpr { + return navigableMapImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsSelect() SelectExpr { + return navigableSelectImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsStruct() StructExpr { + return navigableStructImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr { + return newNavigableExpr(nav.ast, nav, e, nav.depth+1) +} + +func (nav *navigableExprImpl) isExpr() {} + +type navigableCallImpl struct { + *navigableExprImpl +} + +func (call navigableCallImpl) FunctionName() string { + return call.Expr.AsCall().FunctionName() +} + +func (call navigableCallImpl) IsMemberFunction() bool { + return call.Expr.AsCall().IsMemberFunction() +} + +func (call navigableCallImpl) Target() Expr { + t := call.Expr.AsCall().Target() + if t != nil { + return call.createChild(t) + } + return nil +} + +func (call navigableCallImpl) Args() []Expr { + args := call.Expr.AsCall().Args() + navArgs := make([]Expr, len(args)) + for i, a := range args { + navArgs[i] = call.createChild(a) + } + return navArgs +} + +type navigableComprehensionImpl struct { + *navigableExprImpl +} + +func (comp navigableComprehensionImpl) IterRange() Expr { + return comp.createChild(comp.Expr.AsComprehension().IterRange()) +} + +func (comp navigableComprehensionImpl) IterVar() string { + return comp.Expr.AsComprehension().IterVar() +} + +func (comp navigableComprehensionImpl) AccuVar() string { + return comp.Expr.AsComprehension().AccuVar() +} + +func (comp navigableComprehensionImpl) AccuInit() Expr { + return comp.createChild(comp.Expr.AsComprehension().AccuInit()) +} + +func (comp navigableComprehensionImpl) LoopCondition() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopCondition()) +} + +func (comp navigableComprehensionImpl) LoopStep() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopStep()) +} + +func (comp navigableComprehensionImpl) Result() Expr { + return comp.createChild(comp.Expr.AsComprehension().Result()) +} + +type navigableListImpl struct { + *navigableExprImpl +} + +func (l navigableListImpl) Elements() []Expr { + pbElems := l.Expr.AsList().Elements() + elems := make([]Expr, len(pbElems)) + for i := 0; i < len(pbElems); i++ { + elems[i] = l.createChild(pbElems[i]) + } + return elems +} + +func (l navigableListImpl) IsOptional(index int32) bool { + return l.Expr.AsList().IsOptional(index) +} + +func (l navigableListImpl) OptionalIndices() []int32 { + return l.Expr.AsList().OptionalIndices() +} + +func (l navigableListImpl) Size() int { + return l.Expr.AsList().Size() +} + +type navigableMapImpl struct { + *navigableExprImpl +} + +func (m navigableMapImpl) Entries() []EntryExpr { + mapExpr := m.Expr.AsMap() + entries := make([]EntryExpr, len(mapExpr.Entries())) + for i, e := range mapExpr.Entries() { + entry := e.AsMapEntry() + entries[i] = &entryExpr{ + id: e.ID(), + entryExprKindCase: navigableEntryImpl{ + key: m.createChild(entry.Key()), + val: m.createChild(entry.Value()), + isOpt: entry.IsOptional(), + }, + } + } + return entries +} + +func (m navigableMapImpl) Size() int { + return m.Expr.AsMap().Size() +} + +type navigableEntryImpl struct { + key NavigableExpr + val NavigableExpr + isOpt bool +} + +func (e navigableEntryImpl) Kind() EntryExprKind { + return MapEntryKind +} + +func (e navigableEntryImpl) Key() Expr { + return e.key +} + +func (e navigableEntryImpl) Value() Expr { + return e.val +} + +func (e navigableEntryImpl) IsOptional() bool { + return e.isOpt +} + +func (e navigableEntryImpl) renumberIDs(IDGenerator) {} + +func (e navigableEntryImpl) isEntryExpr() {} + +type navigableSelectImpl struct { + *navigableExprImpl +} + +func (sel navigableSelectImpl) FieldName() string { + return sel.Expr.AsSelect().FieldName() +} + +func (sel navigableSelectImpl) IsTestOnly() bool { + return sel.Expr.AsSelect().IsTestOnly() +} + +func (sel navigableSelectImpl) Operand() Expr { + return sel.createChild(sel.Expr.AsSelect().Operand()) +} + +type navigableStructImpl struct { + *navigableExprImpl +} + +func (s navigableStructImpl) TypeName() string { + return s.Expr.AsStruct().TypeName() +} + +func (s navigableStructImpl) Fields() []EntryExpr { + fieldInits := s.Expr.AsStruct().Fields() + fields := make([]EntryExpr, len(fieldInits)) + for i, f := range fieldInits { + field := f.AsStructField() + fields[i] = &entryExpr{ + id: f.ID(), + entryExprKindCase: navigableFieldImpl{ + name: field.Name(), + val: s.createChild(field.Value()), + isOpt: field.IsOptional(), + }, + } + } + return fields +} + +type navigableFieldImpl struct { + name string + val NavigableExpr + isOpt bool +} + +func (f navigableFieldImpl) Kind() EntryExprKind { + return StructFieldKind +} + +func (f navigableFieldImpl) Name() string { + return f.name +} + +func (f navigableFieldImpl) Value() Expr { + return f.val +} + +func (f navigableFieldImpl) IsOptional() bool { + return f.isOpt +} + +func (f navigableFieldImpl) renumberIDs(IDGenerator) {} + +func (f navigableFieldImpl) isEntryExpr() {} + +func getChildFactory(expr Expr) childFactory { + if expr == nil { + return noopFactory + } + switch expr.Kind() { + case LiteralKind: + return noopFactory + case IdentKind: + return noopFactory + case SelectKind: + return selectFactory + case CallKind: + return callArgFactory + case ListKind: + return listElemFactory + case MapKind: + return mapEntryFactory + case StructKind: + return structEntryFactory + case ComprehensionKind: + return comprehensionFactory + default: + return noopFactory + } +} + +type childFactory func(*navigableExprImpl) []NavigableExpr + +func noopFactory(*navigableExprImpl) []NavigableExpr { + return nil +} + +func selectFactory(nav *navigableExprImpl) []NavigableExpr { + return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())} +} + +func callArgFactory(nav *navigableExprImpl) []NavigableExpr { + call := nav.Expr.AsCall() + argCount := len(call.Args()) + if call.IsMemberFunction() { + argCount++ + } + navExprs := make([]NavigableExpr, argCount) + i := 0 + if call.IsMemberFunction() { + navExprs[i] = nav.createChild(call.Target()) + i++ + } + for _, arg := range call.Args() { + navExprs[i] = nav.createChild(arg) + i++ + } + return navExprs +} + +func listElemFactory(nav *navigableExprImpl) []NavigableExpr { + l := nav.Expr.AsList() + navExprs := make([]NavigableExpr, len(l.Elements())) + for i, e := range l.Elements() { + navExprs[i] = nav.createChild(e) + } + return navExprs +} + +func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { + s := nav.Expr.AsStruct() + entries := make([]NavigableExpr, len(s.Fields())) + for i, e := range s.Fields() { + f := e.AsStructField() + entries[i] = nav.createChild(f.Value()) + } + return entries +} + +func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { + m := nav.Expr.AsMap() + entries := make([]NavigableExpr, len(m.Entries())*2) + j := 0 + for _, e := range m.Entries() { + mapEntry := e.AsMapEntry() + entries[j] = nav.createChild(mapEntry.Key()) + entries[j+1] = nav.createChild(mapEntry.Value()) + j += 2 + } + return entries +} + +func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { + compre := nav.Expr.AsComprehension() + return []NavigableExpr{ + nav.createChild(compre.IterRange()), + nav.createChild(compre.AccuInit()), + nav.createChild(compre.LoopCondition()), + nav.createChild(compre.LoopStep()), + nav.createChild(compre.Result()), + } +} diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel index 3f3f078871..81197f0641 100644 --- a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel @@ -12,7 +12,7 @@ go_library( ], importpath = "github.com/google/cel-go/common/containers", deps = [ - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//common/ast:go_default_library", ], ) @@ -26,6 +26,6 @@ go_test( ":go_default_library", ], deps = [ - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//common/ast:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go index d46698d3cd..52153d4cd1 100644 --- a/vendor/github.com/google/cel-go/common/containers/container.go +++ b/vendor/github.com/google/cel-go/common/containers/container.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" ) var ( @@ -297,19 +297,19 @@ func Name(name string) ContainerOption { // ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean // 'found' value that indicates if the conversion is successful. -func ToQualifiedName(e *exprpb.Expr) (string, bool) { - switch e.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - id := e.GetIdentExpr() - return id.GetName(), true - case *exprpb.Expr_SelectExpr: - sel := e.GetSelectExpr() +func ToQualifiedName(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + id := e.AsIdent() + return id, true + case ast.SelectKind: + sel := e.AsSelect() // Test only expressions are not valid as qualified names. - if sel.GetTestOnly() { + if sel.IsTestOnly() { return "", false } - if qual, found := ToQualifiedName(sel.GetOperand()); found { - return qual + "." + sel.GetField(), true + if qual, found := ToQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true } } return "", false diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel index 1f029839c7..724ed34045 100644 --- a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel @@ -13,6 +13,8 @@ go_library( importpath = "github.com/google/cel-go/common/debug", deps = [ "//common:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//common/ast:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go index 5dab156ef3..e4c01ac6ed 100644 --- a/vendor/github.com/google/cel-go/common/debug/debug.go +++ b/vendor/github.com/google/cel-go/common/debug/debug.go @@ -22,7 +22,9 @@ import ( "strconv" "strings" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" ) // Adorner returns debug metadata that will be tacked on to the string @@ -38,7 +40,7 @@ type Writer interface { // Buffer pushes an expression into an internal queue of expressions to // write to a string. - Buffer(e *exprpb.Expr) + Buffer(e ast.Expr) } type emptyDebugAdorner struct { @@ -51,12 +53,12 @@ func (a *emptyDebugAdorner) GetMetadata(e any) string { } // ToDebugString gives the unadorned string representation of the Expr. -func ToDebugString(e *exprpb.Expr) string { +func ToDebugString(e ast.Expr) string { return ToAdornedDebugString(e, emptyAdorner) } // ToAdornedDebugString gives the adorned string representation of the Expr. -func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string { +func ToAdornedDebugString(e ast.Expr, adorner Adorner) string { w := newDebugWriter(adorner) w.Buffer(e) return w.String() @@ -78,49 +80,51 @@ func newDebugWriter(a Adorner) *debugWriter { } } -func (w *debugWriter) Buffer(e *exprpb.Expr) { +func (w *debugWriter) Buffer(e ast.Expr) { if e == nil { return } - switch e.ExprKind.(type) { - case *exprpb.Expr_ConstExpr: - w.append(formatLiteral(e.GetConstExpr())) - case *exprpb.Expr_IdentExpr: - w.append(e.GetIdentExpr().Name) - case *exprpb.Expr_SelectExpr: - w.appendSelect(e.GetSelectExpr()) - case *exprpb.Expr_CallExpr: - w.appendCall(e.GetCallExpr()) - case *exprpb.Expr_ListExpr: - w.appendList(e.GetListExpr()) - case *exprpb.Expr_StructExpr: - w.appendStruct(e.GetStructExpr()) - case *exprpb.Expr_ComprehensionExpr: - w.appendComprehension(e.GetComprehensionExpr()) + switch e.Kind() { + case ast.LiteralKind: + w.append(formatLiteral(e.AsLiteral())) + case ast.IdentKind: + w.append(e.AsIdent()) + case ast.SelectKind: + w.appendSelect(e.AsSelect()) + case ast.CallKind: + w.appendCall(e.AsCall()) + case ast.ListKind: + w.appendList(e.AsList()) + case ast.MapKind: + w.appendMap(e.AsMap()) + case ast.StructKind: + w.appendStruct(e.AsStruct()) + case ast.ComprehensionKind: + w.appendComprehension(e.AsComprehension()) } w.adorn(e) } -func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) { - w.Buffer(sel.GetOperand()) +func (w *debugWriter) appendSelect(sel ast.SelectExpr) { + w.Buffer(sel.Operand()) w.append(".") - w.append(sel.GetField()) - if sel.TestOnly { + w.append(sel.FieldName()) + if sel.IsTestOnly() { w.append("~test-only~") } } -func (w *debugWriter) appendCall(call *exprpb.Expr_Call) { - if call.Target != nil { - w.Buffer(call.GetTarget()) +func (w *debugWriter) appendCall(call ast.CallExpr) { + if call.IsMemberFunction() { + w.Buffer(call.Target()) w.append(".") } - w.append(call.GetFunction()) + w.append(call.FunctionName()) w.append("(") - if len(call.GetArgs()) > 0 { + if len(call.Args()) > 0 { w.addIndent() w.appendLine() - for i, arg := range call.GetArgs() { + for i, arg := range call.Args() { if i > 0 { w.append(",") w.appendLine() @@ -133,12 +137,12 @@ func (w *debugWriter) appendCall(call *exprpb.Expr_Call) { w.append(")") } -func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) { +func (w *debugWriter) appendList(list ast.ListExpr) { w.append("[") - if len(list.GetElements()) > 0 { + if len(list.Elements()) > 0 { w.appendLine() w.addIndent() - for i, elem := range list.GetElements() { + for i, elem := range list.Elements() { if i > 0 { w.append(",") w.appendLine() @@ -151,32 +155,25 @@ func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) { w.append("]") } -func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) { - if obj.MessageName != "" { - w.appendObject(obj) - } else { - w.appendMap(obj) - } -} - -func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) { - w.append(obj.GetMessageName()) +func (w *debugWriter) appendStruct(obj ast.StructExpr) { + w.append(obj.TypeName()) w.append("{") - if len(obj.GetEntries()) > 0 { + if len(obj.Fields()) > 0 { w.appendLine() w.addIndent() - for i, entry := range obj.GetEntries() { + for i, f := range obj.Fields() { + field := f.AsStructField() if i > 0 { w.append(",") w.appendLine() } - if entry.GetOptionalEntry() { + if field.IsOptional() { w.append("?") } - w.append(entry.GetFieldKey()) + w.append(field.Name()) w.append(":") - w.Buffer(entry.GetValue()) - w.adorn(entry) + w.Buffer(field.Value()) + w.adorn(f) } w.removeIndent() w.appendLine() @@ -184,23 +181,24 @@ func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) { w.append("}") } -func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) { +func (w *debugWriter) appendMap(m ast.MapExpr) { w.append("{") - if len(obj.GetEntries()) > 0 { + if m.Size() > 0 { w.appendLine() w.addIndent() - for i, entry := range obj.GetEntries() { + for i, e := range m.Entries() { + entry := e.AsMapEntry() if i > 0 { w.append(",") w.appendLine() } - if entry.GetOptionalEntry() { + if entry.IsOptional() { w.append("?") } - w.Buffer(entry.GetMapKey()) + w.Buffer(entry.Key()) w.append(":") - w.Buffer(entry.GetValue()) - w.adorn(entry) + w.Buffer(entry.Value()) + w.adorn(e) } w.removeIndent() w.appendLine() @@ -208,62 +206,62 @@ func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) { w.append("}") } -func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) { +func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) { w.append("__comprehension__(") w.addIndent() w.appendLine() w.append("// Variable") w.appendLine() - w.append(comprehension.GetIterVar()) + w.append(comprehension.IterVar()) w.append(",") w.appendLine() w.append("// Target") w.appendLine() - w.Buffer(comprehension.GetIterRange()) + w.Buffer(comprehension.IterRange()) w.append(",") w.appendLine() w.append("// Accumulator") w.appendLine() - w.append(comprehension.GetAccuVar()) + w.append(comprehension.AccuVar()) w.append(",") w.appendLine() w.append("// Init") w.appendLine() - w.Buffer(comprehension.GetAccuInit()) + w.Buffer(comprehension.AccuInit()) w.append(",") w.appendLine() w.append("// LoopCondition") w.appendLine() - w.Buffer(comprehension.GetLoopCondition()) + w.Buffer(comprehension.LoopCondition()) w.append(",") w.appendLine() w.append("// LoopStep") w.appendLine() - w.Buffer(comprehension.GetLoopStep()) + w.Buffer(comprehension.LoopStep()) w.append(",") w.appendLine() w.append("// Result") w.appendLine() - w.Buffer(comprehension.GetResult()) + w.Buffer(comprehension.Result()) w.append(")") w.removeIndent() } -func formatLiteral(c *exprpb.Constant) string { - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return fmt.Sprintf("%t", c.GetBoolValue()) - case *exprpb.Constant_BytesValue: - return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue())) - case *exprpb.Constant_DoubleValue: - return fmt.Sprintf("%v", c.GetDoubleValue()) - case *exprpb.Constant_Int64Value: - return fmt.Sprintf("%d", c.GetInt64Value()) - case *exprpb.Constant_StringValue: - return strconv.Quote(c.GetStringValue()) - case *exprpb.Constant_Uint64Value: - return fmt.Sprintf("%du", c.GetUint64Value()) - case *exprpb.Constant_NullValue: +func formatLiteral(c ref.Val) string { + switch v := c.(type) { + case types.Bool: + return fmt.Sprintf("%t", v) + case types.Bytes: + return fmt.Sprintf("b\"%s\"", string(v)) + case types.Double: + return fmt.Sprintf("%v", float64(v)) + case types.Int: + return fmt.Sprintf("%d", int64(v)) + case types.String: + return strconv.Quote(string(v)) + case types.Uint: + return fmt.Sprintf("%du", uint64(v)) + case types.Null: return "null" default: panic("Unknown constant type") diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go index 734ebe57e5..0a42f81d40 100644 --- a/vendor/github.com/google/cel-go/common/decls/decls.go +++ b/vendor/github.com/google/cel-go/common/decls/decls.go @@ -162,7 +162,9 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { if oID == overload.ID() { if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { // Allow redefinition of an overload implementation so long as the signatures match. - f.overloads[oID] = overload + if overload.hasBinding() { + f.overloads[oID] = overload + } return nil } return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go index 774dcb5b48..0cf21345e6 100644 --- a/vendor/github.com/google/cel-go/common/error.go +++ b/vendor/github.com/google/cel-go/common/error.go @@ -18,8 +18,6 @@ import ( "fmt" "strings" "unicode/utf8" - - "golang.org/x/text/width" ) // NewError creates an error associated with an expression id with the given message at the given location. @@ -35,18 +33,15 @@ type Error struct { } const ( - dot = "." - ind = "^" + dot = "." + ind = "^" + wideDot = "\uff0e" + wideInd = "\uff3e" // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet. maxSnippetLength = 16384 ) -var ( - wideDot = width.Widen.String(dot) - wideInd = width.Widen.String(ind) -) - // ToDisplayString decorates the error message with the source location. func (e *Error) ToDisplayString(source Source) string { var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go index 63919714ea..25adc73d8e 100644 --- a/vendor/github.com/google/cel-go/common/errors.go +++ b/vendor/github.com/google/cel-go/common/errors.go @@ -64,7 +64,7 @@ func (e *Errors) GetErrors() []*Error { // Append creates a new Errors object with the current and input errors. func (e *Errors) Append(errs []*Error) *Errors { return &Errors{ - errors: append(e.errors, errs...), + errors: append(e.errors[:], errs...), source: e.source, numErrors: e.numErrors + len(errs), maxErrorsToReport: e.maxErrorsToReport, diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go index 50aac0b273..021198224d 100644 --- a/vendor/github.com/google/cel-go/common/runes/buffer.go +++ b/vendor/github.com/google/cel-go/common/runes/buffer.go @@ -127,20 +127,48 @@ var nilBuffer = &emptyBuffer{} // elements of the byte or uint16 array, and continue. The underlying storage is an rune array // containing any Unicode character. func NewBuffer(data string) Buffer { + buf, _ := newBuffer(data, false) + return buf +} + +// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on +// the ranges of the encoded code points contained within, as well as returning the line offsets. +// +// Code points are represented as an array of byte, uint16, or rune. This approach ensures that +// each index represents a code point by itself without needing to use an array of rune. At first +// we assume all code points are less than or equal to '\u007f'. If this holds true, the +// underlying storage is a byte array containing only ASCII characters. If we encountered a code +// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the +// elements of previous byte array to the uint16 array, and continue. If this holds true, the +// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual +// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous +// elements of the byte or uint16 array, and continue. The underlying storage is an rune array +// containing any Unicode character. +func NewBufferAndLineOffsets(data string) (Buffer, []int32) { + return newBuffer(data, true) +} + +func newBuffer(data string, lines bool) (Buffer, []int32) { if len(data) == 0 { - return nilBuffer + return nilBuffer, []int32{0} } var ( - idx = 0 - buf8 = make([]byte, 0, len(data)) + idx = 0 + off int32 = 0 + buf8 = make([]byte, 0, len(data)) buf16 []uint16 buf32 []rune + offs []int32 ) for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } if r < utf8.RuneSelf { buf8 = append(buf8, byte(r)) + off++ continue } if r <= 0xffff { @@ -150,6 +178,7 @@ func NewBuffer(data string) Buffer { } buf8 = nil buf16 = append(buf16, uint16(r)) + off++ goto copy16 } buf32 = make([]rune, len(buf8), len(data)) @@ -158,17 +187,25 @@ func NewBuffer(data string) Buffer { } buf8 = nil buf32 = append(buf32, r) + off++ goto copy32 } + if lines { + offs = append(offs, off+1) + } return &asciiBuffer{ arr: buf8, - } + }, offs copy16: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } if r <= 0xffff { buf16 = append(buf16, uint16(r)) + off++ continue } buf32 = make([]rune, len(buf16), len(data)) @@ -177,18 +214,29 @@ copy16: } buf16 = nil buf32 = append(buf32, r) + off++ goto copy32 } + if lines { + offs = append(offs, off+1) + } return &basicBuffer{ arr: buf16, - } + }, offs copy32: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } buf32 = append(buf32, r) + off++ + } + if lines { + offs = append(offs, off+1) } return &supplementalBuffer{ arr: buf32, - } + }, offs } diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go index acf22bdf15..ec79cb5454 100644 --- a/vendor/github.com/google/cel-go/common/source.go +++ b/vendor/github.com/google/cel-go/common/source.go @@ -15,9 +15,6 @@ package common import ( - "strings" - "unicode/utf8" - "github.com/google/cel-go/common/runes" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -80,17 +77,11 @@ func NewTextSource(text string) Source { // NewStringSource creates a new Source from the given contents and description. func NewStringSource(contents string, description string) Source { // Compute line offsets up front as they are referred to frequently. - lines := strings.Split(contents, "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset - } + buf, offs := runes.NewBufferAndLineOffsets(contents) return &sourceImpl{ - Buffer: runes.NewBuffer(contents), + Buffer: buf, description: description, - lineOffsets: offsets, + lineOffsets: offs, } } @@ -172,9 +163,8 @@ func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { for _, lineOffset := range s.lineOffsets { if lineOffset > characterOffset { break - } else { - line++ } + line++ } if line == 1 { return line, 0 diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel index c130a93f63..b55f452156 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -12,7 +12,6 @@ go_library( ], importpath = "github.com/google/cel-go/common/stdlib", deps = [ - "//checker/decls:go_default_library", "//common/decls:go_default_library", "//common/functions:go_default_library", "//common/operators:go_default_library", @@ -20,6 +19,5 @@ go_library( "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ], ) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go index d02cb64bf1..1550c17863 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/standard.go +++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -23,15 +23,11 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) var ( stdFunctions []*decls.FunctionDecl - stdFnDecls []*exprpb.Decl stdTypes []*decls.VariableDecl - stdTypeDecls []*exprpb.Decl ) func init() { @@ -55,15 +51,6 @@ func init() { decls.TypeVariable(types.UintType), } - stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes)) - for _, stdType := range stdTypes { - typeVar, err := decls.VariableDeclToExprDecl(stdType) - if err != nil { - panic(err) - } - stdTypeDecls = append(stdTypeDecls, typeVar) - } - stdFunctions = []*decls.FunctionDecl{ // Logical operators. Special-cased within the interpreter. // Note, the singleton binding prevents extensions from overriding the operator behavior. @@ -576,18 +563,6 @@ func init() { decls.MemberOverload(overloads.DurationToMilliseconds, argTypes(types.DurationType), types.IntType)), } - - stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions)) - for _, fn := range stdFunctions { - if fn.IsDeclarationDisabled() { - continue - } - ed, err := decls.FunctionDeclToExprDecl(fn) - if err != nil { - panic(err) - } - stdFnDecls = append(stdFnDecls, ed) - } } // Functions returns the set of standard library function declarations and definitions for CEL. @@ -595,27 +570,11 @@ func Functions() []*decls.FunctionDecl { return stdFunctions } -// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads -// in the CEL standard environment. -// -// Deprecated: use Functions -func FunctionExprDecls() []*exprpb.Decl { - return stdFnDecls -} - // Types returns the set of standard library types for CEL. func Types() []*decls.VariableDecl { return stdTypes } -// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL -// standard environment. -// -// Deprecated: use Types -func TypeExprDecls() []*exprpb.Decl { - return stdTypeDecls -} - func notStrictlyFalse(value ref.Val) ref.Val { if types.IsBool(value) { return value diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go index 5838755f8b..7e813e291b 100644 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -58,7 +58,17 @@ func (b Bytes) Compare(other ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { - case reflect.Array, reflect.Slice: + case reflect.Array: + if len(b) != typeDesc.Len() { + return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len()) + } + refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem())) + refArr := refArrPtr.Elem() + for i, byt := range b { + refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem())) + } + return refArr.Interface(), nil + case reflect.Slice: return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil case reflect.Ptr: switch typeDesc { diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go index aa8f94b4f8..9c9d9e21ef 100644 --- a/vendor/github.com/google/cel-go/common/types/err.go +++ b/vendor/github.com/google/cel-go/common/types/err.go @@ -31,6 +31,7 @@ type Error interface { // Err type which extends the built-in go error and implements ref.Val. type Err struct { error + id int64 } var ( @@ -58,7 +59,24 @@ var ( // NewErr creates a new Err described by the format string and args. // TODO: Audit the use of this function and standardize the error messages and codes. func NewErr(format string, args ...any) ref.Val { - return &Err{fmt.Errorf(format, args...)} + return &Err{error: fmt.Errorf(format, args...)} +} + +// NewErrWithNodeID creates a new Err described by the format string and args. +// TODO: Audit the use of this function and standardize the error messages and codes. +func NewErrWithNodeID(id int64, format string, args ...any) ref.Val { + return &Err{error: fmt.Errorf(format, args...), id: id} +} + +// LabelErrNode returns val unaltered it is not an Err or if the error has a non-zero +// AST node ID already present. Otherwise the id is added to the error for +// recovery with the Err.NodeID method. +func LabelErrNode(id int64, val ref.Val) ref.Val { + if err, ok := val.(*Err); ok && err.id == 0 { + err.id = id + return err + } + return val } // NoSuchOverloadErr returns a new types.Err instance with a no such overload message. @@ -124,6 +142,11 @@ func (e *Err) Value() any { return e.error } +// NodeID returns the AST node ID of the expression that returned the error. +func (e *Err) NodeID() int64 { + return e.id +} + // Is implements errors.Is. func (e *Err) Is(target error) bool { return e.error.Error() == target.Error() diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go index 940772aed1..0ae9507c39 100644 --- a/vendor/github.com/google/cel-go/common/types/int.go +++ b/vendor/github.com/google/cel-go/common/types/int.go @@ -90,6 +90,18 @@ func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) { return nil, err } return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int8: + v, err := int64ToInt8Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int16: + v, err := int64ToInt16Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil case reflect.Int64: return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil case reflect.Ptr: diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index d4932b4a90..06f48dde75 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -190,7 +190,13 @@ func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) { // Allow the element ConvertToNative() function to determine whether conversion is possible. otherElemType := typeDesc.Elem() elemCount := l.size - nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount) + var nativeList reflect.Value + if typeDesc.Kind() == reflect.Array { + nativeList = reflect.New(reflect.ArrayOf(elemCount, typeDesc)).Elem().Index(0) + } else { + nativeList = reflect.MakeSlice(typeDesc, elemCount, elemCount) + + } for i := 0; i < elemCount; i++ { elem := l.NativeToValue(l.get(i)) nativeElemVal, err := elem.ConvertToNative(otherElemType) diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go index a9f30aed01..97845a740c 100644 --- a/vendor/github.com/google/cel-go/common/types/optional.go +++ b/vendor/github.com/google/cel-go/common/types/optional.go @@ -24,7 +24,7 @@ import ( var ( // OptionalType indicates the runtime type of an optional value. - OptionalType = NewOpaqueType("optional") + OptionalType = NewOpaqueType("optional_type") // OptionalNone is a sentinel value which is used to indicate an empty optional value. OptionalNone = &Optional{} diff --git a/vendor/github.com/google/cel-go/common/types/overflow.go b/vendor/github.com/google/cel-go/common/types/overflow.go index c68a921826..dcb66ef596 100644 --- a/vendor/github.com/google/cel-go/common/types/overflow.go +++ b/vendor/github.com/google/cel-go/common/types/overflow.go @@ -326,6 +326,26 @@ func int64ToUint64Checked(v int64) (uint64, error) { return uint64(v), nil } +// int64ToInt8Checked converts an int64 to an int8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt8Checked(v int64) (int8, error) { + if v < math.MinInt8 || v > math.MaxInt8 { + return 0, errIntOverflow + } + return int8(v), nil +} + +// int64ToInt16Checked converts an int64 to an int16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt16Checked(v int64) (int16, error) { + if v < math.MinInt16 || v > math.MaxInt16 { + return 0, errIntOverflow + } + return int16(v), nil +} + // int64ToInt32Checked converts an int64 to an int32 value. // // If the conversion fails due to overflow the error return value will be non-nil. @@ -336,6 +356,26 @@ func int64ToInt32Checked(v int64) (int32, error) { return int32(v), nil } +// uint64ToUint8Checked converts a uint64 to a uint8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint8Checked(v uint64) (uint8, error) { + if v > math.MaxUint8 { + return 0, errUintOverflow + } + return uint8(v), nil +} + +// uint64ToUint16Checked converts a uint64 to a uint16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint16Checked(v uint64) (uint16, error) { + if v > math.MaxUint16 { + return 0, errUintOverflow + } + return uint16(v), nil +} + // uint64ToUint32Checked converts a uint64 to a uint32 value. // // If the conversion fails due to overflow the error return value will be non-nil. diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index 6cc95c276d..bdd474c95a 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -427,22 +427,49 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { return structpb.NullValue_NULL_VALUE, true, nil } case *wrapperspb.BoolValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.BytesValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.DoubleValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.FloatValue: + if v == nil { + return nil, true, nil + } return float64(v.GetValue()), true, nil case *wrapperspb.Int32Value: + if v == nil { + return nil, true, nil + } return int64(v.GetValue()), true, nil case *wrapperspb.Int64Value: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.StringValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.UInt32Value: + if v == nil { + return nil, true, nil + } return uint64(v.GetValue()), true, nil case *wrapperspb.UInt64Value: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil } return msg, false, nil diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go index e80b4622e2..936a4e28b2 100644 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -54,6 +54,10 @@ type Provider interface { // Returns false if not found. FindStructType(structType string) (*Type, bool) + // FindStructFieldNames returns thet field names associated with the type, if the type + // is found. + FindStructFieldNames(structType string) ([]string, bool) + // FieldStructFieldType returns the field type for a checked type value. Returns // false if the field could not be found. FindStructFieldType(structType, fieldName string) (*FieldType, bool) @@ -154,7 +158,7 @@ func (p *Registry) EnumValue(enumName string) ref.Val { return Int(enumVal.Value()) } -// FieldFieldType returns the field type for a checked type value. Returns false if +// FindFieldType returns the field type for a checked type value. Returns false if // the field could not be found. // // Deprecated: use FindStructFieldType @@ -173,7 +177,24 @@ func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, GetFrom: field.GetFrom}, true } -// FieldStructFieldType returns the field type for a checked type value. Returns +// FindStructFieldNames returns the set of field names for the given struct type, +// if the type exists in the registry. +func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return []string{}, false + } + fieldMap := msgType.FieldMap() + fields := make([]string, len(fieldMap)) + idx := 0 + for f := range fieldMap { + fields[idx] = f + idx++ + } + return fields, true +} + +// FindStructFieldType returns the field type for a checked type value. Returns // false if the field could not be found. func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) { msgType, found := p.pbdb.DescribeType(structType) @@ -255,7 +276,7 @@ func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Va } err := msgSetField(msg, field, value) if err != nil { - return &Err{err} + return &Err{error: err} } } return p.NativeToValue(msg.Interface()) @@ -564,17 +585,46 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { refKind := refValue.Kind() switch refKind { case reflect.Array, reflect.Slice: + if refValue.Type().Elem() == reflect.TypeOf(byte(0)) { + if refValue.CanAddr() { + return Bytes(refValue.Bytes()), true + } + tmp := reflect.New(refValue.Type()) + tmp.Elem().Set(refValue) + return Bytes(tmp.Elem().Bytes()), true + } return NewDynamicList(a, v), true case reflect.Map: return NewDynamicMap(a, v), true // type aliases of primitive types cannot be asserted as that type, but rather need // to be downcast to int32 before being converted to a CEL representation. + case reflect.Bool: + boolTupe := reflect.TypeOf(false) + return Bool(refValue.Convert(boolTupe).Interface().(bool)), true + case reflect.Int: + intType := reflect.TypeOf(int(0)) + return Int(refValue.Convert(intType).Interface().(int)), true + case reflect.Int8: + intType := reflect.TypeOf(int8(0)) + return Int(refValue.Convert(intType).Interface().(int8)), true + case reflect.Int16: + intType := reflect.TypeOf(int16(0)) + return Int(refValue.Convert(intType).Interface().(int16)), true case reflect.Int32: intType := reflect.TypeOf(int32(0)) return Int(refValue.Convert(intType).Interface().(int32)), true case reflect.Int64: intType := reflect.TypeOf(int64(0)) return Int(refValue.Convert(intType).Interface().(int64)), true + case reflect.Uint: + uintType := reflect.TypeOf(uint(0)) + return Uint(refValue.Convert(uintType).Interface().(uint)), true + case reflect.Uint8: + uintType := reflect.TypeOf(uint8(0)) + return Uint(refValue.Convert(uintType).Interface().(uint8)), true + case reflect.Uint16: + uintType := reflect.TypeOf(uint16(0)) + return Uint(refValue.Convert(uintType).Interface().(uint16)), true case reflect.Uint32: uintType := reflect.TypeOf(uint32(0)) return Uint(refValue.Convert(uintType).Interface().(uint32)), true @@ -587,6 +637,9 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { case reflect.Float64: doubleType := reflect.TypeOf(float64(0)) return Double(refValue.Convert(doubleType).Interface().(float64)), true + case reflect.String: + stringType := reflect.TypeOf("") + return String(refValue.Convert(stringType).Interface().(string)), true } } return nil, false diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go index 028e6824d2..3a93743f24 100644 --- a/vendor/github.com/google/cel-go/common/types/string.go +++ b/vendor/github.com/google/cel-go/common/types/string.go @@ -66,10 +66,7 @@ func (s String) Compare(other ref.Val) ref.Val { func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { case reflect.String: - if reflect.TypeOf(s).AssignableTo(typeDesc) { - return s, nil - } - return s.Value(), nil + return reflect.ValueOf(s).Convert(typeDesc).Interface(), nil case reflect.Ptr: switch typeDesc { case anyValueType: @@ -158,7 +155,7 @@ func (s String) Match(pattern ref.Val) ref.Val { } matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string)) if err != nil { - return &Err{err} + return &Err{error: err} } return Bool(matched) } diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go index 76624eefde..6c3d5f719b 100644 --- a/vendor/github.com/google/cel-go/common/types/types.go +++ b/vendor/github.com/google/cel-go/common/types/types.go @@ -373,6 +373,23 @@ func (t *Type) TypeName() string { return t.runtimeTypeName } +// WithTraits creates a copy of the current Type and sets the trait mask to the traits parameter. +// +// This method should be used with Opaque types where the type acts like a container, e.g. vector. +func (t *Type) WithTraits(traits int) *Type { + if t == nil { + return nil + } + return &Type{ + kind: t.kind, + parameters: t.parameters, + runtimeTypeName: t.runtimeTypeName, + isAssignableType: t.isAssignableType, + isAssignableRuntimeType: t.isAssignableRuntimeType, + traitMask: traits, + } +} + // String returns a human-readable definition of the type name. func (t *Type) String() string { if len(t.Parameters()) == 0 { @@ -496,7 +513,7 @@ func NewNullableType(wrapped *Type) *Type { // NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. func NewOptionalType(param *Type) *Type { - return NewOpaqueType("optional", param) + return NewOpaqueType("optional_type", param) } // NewOpaqueType creates an abstract parameterized type with a given name. diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go index 3257f9ade9..6d74f30d88 100644 --- a/vendor/github.com/google/cel-go/common/types/uint.go +++ b/vendor/github.com/google/cel-go/common/types/uint.go @@ -80,6 +80,18 @@ func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) { return 0, err } return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint8: + v, err := uint64ToUint8Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint16: + v, err := uint64ToUint16Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil case reflect.Uint64: return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil case reflect.Ptr: diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel index 6fdcc60c65..db223da2f7 100644 --- a/vendor/github.com/google/cel-go/ext/BUILD.bazel +++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -7,7 +7,9 @@ package( go_library( name = "go_default_library", srcs = [ + "bindings.go", "encoders.go", + "formatting.go", "guards.go", "lists.go", "math.go", @@ -21,14 +23,14 @@ go_library( deps = [ "//cel:go_default_library", "//checker:go_default_library", - "//checker/decls:go_default_library", + "//common/ast:go_default_library", "//common/overloads:go_default_library", + "//common/operators:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//interpreter:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", "@org_golang_google_protobuf//types/known/structpb", @@ -61,7 +63,6 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md index 6f621ac4af..a55f56de12 100644 --- a/vendor/github.com/google/cel-go/ext/README.md +++ b/vendor/github.com/google/cel-go/ext/README.md @@ -100,7 +100,8 @@ argument. Simple numeric and list literals are supported as valid argument types; however, other literals will be flagged as errors during macro expansion. If the argument expression does not resolve to a numeric or list(numeric) type during type-checking, or during runtime then an error -will be produced. If a list argument is empty, this too will produce an error. +will be produced. If a list argument is empty, this too will produce an +error. math.least(, ...) -> @@ -117,6 +118,244 @@ Examples: math.least(a, b) // check-time error if a or b is non-numeric math.least(dyn('string')) // runtime error +### Math.BitOr + +Introduced at version: 1 + +Performs a bitwise-OR operation over two int or uint values. + + math.bitOr(, ) -> + math.bitOr(, ) -> + +Examples: + + math.bitOr(1u, 2u) // returns 3u + math.bitOr(-2, -4) // returns -2 + +### Math.BitAnd + +Introduced at version: 1 + +Performs a bitwise-AND operation over two int or uint values. + + math.bitAnd(, ) -> + math.bitAnd(, ) -> + +Examples: + + math.bitAnd(3u, 2u) // return 2u + math.bitAnd(3, 5) // returns 3 + math.bitAnd(-3, -5) // returns -7 + +### Math.BitXor + +Introduced at version: 1 + + math.bitXor(, ) -> + math.bitXor(, ) -> + +Performs a bitwise-XOR operation over two int or uint values. + +Examples: + + math.bitXor(3u, 5u) // returns 6u + math.bitXor(1, 3) // returns 2 + +### Math.BitNot + +Introduced at version: 1 + +Function which accepts a single int or uint and performs a bitwise-NOT +ones-complement of the given binary value. + + math.bitNot() -> + math.bitNot() -> + +Examples + + math.bitNot(1) // returns -1 + math.bitNot(-1) // return 0 + math.bitNot(0u) // returns 18446744073709551615u + +### Math.BitShiftLeft + +Introduced at version: 1 + +Perform a left shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will be always be returned +since the number of bits shifted is greater than or equal to the total bit +length of the number being shifted. Negative valued bit shifts will result +in a runtime error. + + math.bitShiftLeft(, ) -> + math.bitShiftLeft(, ) -> + +Examples + + math.bitShiftLeft(1, 2) // returns 4 + math.bitShiftLeft(-1, 2) // returns -4 + math.bitShiftLeft(1u, 2) // return 4u + math.bitShiftLeft(1u, 200) // returns 0u + +### Math.BitShiftRight + +Introduced at version: 1 + +Perform a right shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will always be returned since +the number of bits shifted is greater than or equal to the total bit length +of the number being shifted. Negative valued bit shifts will result in a +runtime error. + +The sign bit extension will not be preserved for this operation: vacant bits +on the left are filled with 0. + + math.bitShiftRight(, ) -> + math.bitShiftRight(, ) -> + +Examples + + math.bitShiftRight(1024, 2) // returns 256 + math.bitShiftRight(1024u, 2) // returns 256u + math.bitShiftRight(1024u, 64) // returns 0u + +### Math.Ceil + +Introduced at version: 1 + +Compute the ceiling of a double value. + + math.ceil() -> + +Examples: + + math.ceil(1.2) // returns 2.0 + math.ceil(-1.2) // returns -1.0 + +### Math.Floor + +Introduced at version: 1 + +Compute the floor of a double value. + + math.floor() -> + +Examples: + + math.floor(1.2) // returns 1.0 + math.floor(-1.2) // returns -2.0 + +### Math.Round + +Introduced at version: 1 + +Rounds the double value to the nearest whole number with ties rounding away +from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. + + math.round() -> + +Examples: + + math.round(1.2) // returns 1.0 + math.round(1.5) // returns 2.0 + math.round(-1.5) // returns -2.0 + +### Math.Trunc + +Introduced at version: 1 + +Truncates the fractional portion of the double value. + + math.trunc() -> + +Examples: + + math.trunc(-1.3) // returns -1.0 + math.trunc(1.3) // returns 1.0 + +### Math.Abs + +Introduced at version: 1 + +Returns the absolute value of the numeric type provided as input. If the +value is NaN, the output is NaN. If the input is int64 min, the function +will result in an overflow error. + + math.abs() -> + math.abs() -> + math.abs() -> + +Examples: + + math.abs(-1) // returns 1 + math.abs(1) // returns 1 + math.abs(-9223372036854775808) // overlflow error + +### Math.Sign + +Introduced at version: 1 + +Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +uint depending on the overload. For floating point values, if NaN is +provided as input, the output is also NaN. The implementation does not +differentiate between positive and negative zero. + + math.sign() -> + math.sign() -> + math.sign() -> + +Examples: + + math.sign(-42) // returns -1 + math.sign(0) // returns 0 + math.sign(42) // returns 1 + +### Math.IsInf + +Introduced at version: 1 + +Returns true if the input double value is -Inf or +Inf. + + math.isInf() -> + +Examples: + + math.isInf(1.0/0.0) // returns true + math.isInf(1.2) // returns false + +### Math.IsNaN + +Introduced at version: 1 + +Returns true if the input double value is NaN, false otherwise. + + math.isNaN() -> + +Examples: + + math.isNaN(0.0/0.0) // returns true + math.isNaN(1.2) // returns false + +### Math.IsFinite + +Introduced at version: 1 + +Returns true if the value is a finite number. Equivalent in behavior to: +!math.isNaN(double) && !math.isInf(double) + + math.isFinite() -> + +Examples: + + math.isFinite(0.0/0.0) // returns false + math.isFinite(1.2) // returns true + ## Protos Protos configure extended macros and functions for proto manipulation. @@ -273,10 +512,10 @@ elements in the resulting string. Examples: - ['hello', 'mellow'].join() // returns 'hellomellow' - ['hello', 'mellow'].join(' ') // returns 'hello mellow' - [].join() // returns '' - [].join('/') // returns '' + ['hello', 'mellow'].join() // returns 'hellomellow' + ['hello', 'mellow'].join(' ') // returns 'hello mellow' + [].join() // returns '' + [].join('/') // returns '' ### LastIndexOf @@ -414,3 +653,17 @@ Examples: 'TacoCat'.upperAscii() // returns 'TACOCAT' 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' + +### Reverse + +Returns a new string whose characters are the same as the target string, only formatted in +reverse order. +This function relies on converting strings to rune arrays in order to reverse. +It can be located in Version 3 of strings. + + .reverse() -> + +Examples: + + 'gums'.reverse() // returns 'smug' + 'John Smith'.reverse() // returns 'htimS nhoJ' \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go index 4ac9a7f07f..2c6cc627fa 100644 --- a/vendor/github.com/google/cel-go/ext/bindings.go +++ b/vendor/github.com/google/cel-go/ext/bindings.go @@ -16,8 +16,8 @@ package ext import ( "github.com/google/cel-go/cel" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" ) // Bindings returns a cel.EnvOption to configure support for local variable @@ -61,7 +61,7 @@ func (celBindings) CompileOptions() []cel.EnvOption { return []cel.EnvOption{ cel.Macros( // cel.bind(var, , ) - cel.NewReceiverMacro(bindMacro, 3, celBind), + cel.ReceiverMacro(bindMacro, 3, celBind), ), } } @@ -70,27 +70,27 @@ func (celBindings) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func celBind(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(celNamespace, target) { return nil, nil } varIdent := args[0] varName := "" - switch varIdent.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - varName = varIdent.GetIdentExpr().GetName() + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() default: - return nil, meh.NewError(varIdent.GetId(), "cel.bind() variable names must be simple identifiers") + return nil, mef.NewError(varIdent.ID(), "cel.bind() variable names must be simple identifiers") } varInit := args[1] resultExpr := args[2] - return meh.Fold( + return mef.NewComprehension( + mef.NewList(), unusedIterVar, - meh.NewList(), varName, varInit, - meh.LiteralBool(false), - meh.Ident(varName), + mef.NewLiteral(types.False), + mef.NewIdent(varName), resultExpr, ), nil } diff --git a/vendor/github.com/google/cel-go/ext/formatting.go b/vendor/github.com/google/cel-go/ext/formatting.go new file mode 100644 index 0000000000..dbff613b2a --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/formatting.go @@ -0,0 +1,904 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/text/language" + "golang.org/x/text/message" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +type clauseImpl func(ref.Val, string) (string, error) + +func clauseForType(argType ref.Type) (clauseImpl, error) { + switch argType { + case types.IntType, types.UintType: + return formatDecimal, nil + case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType: + return FormatString, nil + case types.TimestampType, types.DurationType: + // special case to ensure timestamps/durations get printed as CEL literals + return func(arg ref.Val, locale string) (string, error) { + argStrVal := arg.ConvertToType(types.StringType) + argStr := argStrVal.Value().(string) + if arg.Type() == types.TimestampType { + return fmt.Sprintf("timestamp(%q)", argStr), nil + } + if arg.Type() == types.DurationType { + return fmt.Sprintf("duration(%q)", argStr), nil + } + return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName()) + }, nil + case types.ListType: + return formatList, nil + case types.MapType: + return formatMap, nil + case types.DoubleType: + // avoid formatFixed so we can output a period as the decimal separator in order + // to always be a valid CEL literal + return func(arg ref.Val, locale string) (string, error) { + argDouble, ok := arg.Value().(float64) + if !ok { + return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName()) + } + fmtStr := fmt.Sprintf("%%.%df", defaultPrecision) + return fmt.Sprintf(fmtStr, argDouble), nil + }, nil + case types.TypeType: + return func(arg ref.Val, locale string) (string, error) { + return fmt.Sprintf("type(%s)", arg.Value().(string)), nil + }, nil + default: + return nil, fmt.Errorf("no formatting function for %s", argType.TypeName()) + } +} + +func formatList(arg ref.Val, locale string) (string, error) { + argList := arg.(traits.Lister) + argIterator := argList.Iterator() + var listStrBuilder strings.Builder + _, err := listStrBuilder.WriteRune('[') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + for argIterator.HasNext() == types.True { + member := argIterator.Next() + memberFormat, err := clauseForType(member.Type()) + if err != nil { + return "", err + } + unquotedStr, err := memberFormat(member, locale) + if err != nil { + return "", err + } + str := quoteForCEL(member, unquotedStr) + _, err = listStrBuilder.WriteString(str) + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + if argIterator.HasNext() == types.True { + _, err = listStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + } + } + _, err = listStrBuilder.WriteRune(']') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + return listStrBuilder.String(), nil +} + +func formatMap(arg ref.Val, locale string) (string, error) { + argMap := arg.(traits.Mapper) + argIterator := argMap.Iterator() + type mapPair struct { + key string + value string + } + argPairs := make([]mapPair, argMap.Size().Value().(int64)) + i := 0 + for argIterator.HasNext() == types.True { + key := argIterator.Next() + var keyFormat clauseImpl + switch key.Type() { + case types.StringType, types.BoolType: + keyFormat = FormatString + case types.IntType, types.UintType: + keyFormat = formatDecimal + default: + return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName()) + } + unquotedKeyStr, err := keyFormat(key, locale) + if err != nil { + return "", err + } + keyStr := quoteForCEL(key, unquotedKeyStr) + value, found := argMap.Find(key) + if !found { + return "", fmt.Errorf("could not find key: %q", key) + } + valueFormat, err := clauseForType(value.Type()) + if err != nil { + return "", err + } + unquotedValueStr, err := valueFormat(value, locale) + if err != nil { + return "", err + } + valueStr := quoteForCEL(value, unquotedValueStr) + argPairs[i] = mapPair{keyStr, valueStr} + i++ + } + sort.SliceStable(argPairs, func(x, y int) bool { + return argPairs[x].key < argPairs[y].key + }) + var mapStrBuilder strings.Builder + _, err := mapStrBuilder.WriteRune('{') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + for i, entry := range argPairs { + _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value)) + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + if i < len(argPairs)-1 { + _, err = mapStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + } + } + _, err = mapStrBuilder.WriteRune('}') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + return mapStrBuilder.String(), nil +} + +// quoteForCEL takes a formatted, unquoted value and quotes it in a manner suitable +// for embedding directly in CEL. +func quoteForCEL(refVal ref.Val, unquotedValue string) string { + switch refVal.Type() { + case types.StringType: + return fmt.Sprintf("%q", unquotedValue) + case types.BytesType: + return fmt.Sprintf("b%q", unquotedValue) + case types.DoubleType: + // special case to handle infinity/NaN + num := refVal.Value().(float64) + if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) { + return fmt.Sprintf("%q", unquotedValue) + } + return unquotedValue + default: + return unquotedValue + } +} + +// FormatString returns the string representation of a CEL value. +// +// It is used to implement the %s specifier in the (string).format() extension function. +func FormatString(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.ListType: + return formatList(arg, locale) + case types.MapType: + return formatMap(arg, locale) + case types.IntType, types.UintType, types.DoubleType, + types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType: + argStrVal := arg.ConvertToType(types.StringType) + argStr, ok := argStrVal.Value().(string) + if !ok { + return "", fmt.Errorf("could not convert argument %q to string", argStrVal) + } + return argStr, nil + case types.NullType: + return "null", nil + default: + return "", stringFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func formatDecimal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt, ok := arg.ConvertToType(types.IntType).Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + case types.UintType: + argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + default: + return "", decimalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func matchLanguage(locale string) (language.Tag, error) { + matcher, err := makeMatcher(locale) + if err != nil { + return language.Und, err + } + tag, _ := language.MatchStrings(matcher, locale) + return tag, nil +} + +func makeMatcher(locale string) (language.Matcher, error) { + tags := make([]language.Tag, 0) + tag, err := language.Parse(locale) + if err != nil { + return nil, err + } + tags = append(tags, tag) + return language.NewMatcher(tags), nil +} + +type stringFormatter struct{} + +func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) { + return FormatString(arg, locale) +} + +func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) { + return formatDecimal(arg, locale) +} + +func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", fixedPointFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) + } + fmtStr := fmt.Sprintf("%%.%df", *precision) + + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", scientificFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%v\" to float64", argFloatVal.Value()) + } + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + fmtStr := fmt.Sprintf("%%%de", *precision) + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + // locale is intentionally unused as integers formatted as binary + // strings are locale-independent + return fmt.Sprintf("%b", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%b", argInt), nil + case types.BoolType: + argBool := arg.Value().(bool) + if argBool { + return "1", nil + } + return "0", nil + default: + return "", binaryFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + fmtStr := "%x" + if useUpper { + fmtStr = "%X" + } + switch arg.Type() { + case types.StringType, types.BytesType: + if arg.Type() == types.BytesType { + return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil + } + return fmt.Sprintf(fmtStr, arg.Value().(string)), nil + case types.IntType: + argInt, ok := arg.Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + case types.UintType: + argInt, ok := arg.Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + default: + return "", hexFormatError(runtimeID, arg.Type().TypeName()) + } + } +} + +func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + return fmt.Sprintf("%o", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%o", argInt), nil + default: + return "", octalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +// stringFormatValidator implements the cel.ASTValidator interface allowing for static validation +// of string.format calls. +type stringFormatValidator struct{} + +// Name returns the name of the validator. +func (stringFormatValidator) Name() string { + return "cel.lib.ext.validate.functions.string.format" +} + +// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip +// during homogeneous aggregate literal type-checks. +func (stringFormatValidator) Configure(config cel.MutableValidatorConfig) error { + functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string) + functions = append(functions, "format") + return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions) +} + +// Validate parses all literal format strings and type checks the format clause against the argument +// at the corresponding ordinal within the list literal argument to the function, if one is specified. +func (stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { + root := ast.NavigateAST(a) + formatCallExprs := ast.MatchDescendants(root, matchConstantFormatStringWithListLiteralArgs(a)) + for _, e := range formatCallExprs { + call := e.AsCall() + formatStr := call.Target().AsLiteral().Value().(string) + args := call.Args()[0].AsList().Elements() + formatCheck := &stringFormatChecker{ + args: args, + ast: a, + } + // use a placeholder locale, since locale doesn't affect syntax + _, err := parseFormatString(formatStr, formatCheck, formatCheck, "en_US") + if err != nil { + iss.ReportErrorAtID(getErrorExprID(e.ID(), err), err.Error()) + continue + } + seenArgs := formatCheck.argsRequested + if len(args) > seenArgs { + iss.ReportErrorAtID(e.ID(), + "too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(args)) + } + } +} + +// getErrorExprID determines which list literal argument triggered a type-disagreement for the +// purposes of more accurate error message reports. +func getErrorExprID(id int64, err error) int64 { + fmtErr, ok := err.(formatError) + if ok { + return fmtErr.id + } + wrapped := errors.Unwrap(err) + if wrapped != nil { + return getErrorExprID(id, wrapped) + } + return id +} + +// matchConstantFormatStringWithListLiteralArgs matches all valid expression nodes for string +// format checking. +func matchConstantFormatStringWithListLiteralArgs(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if !call.IsMemberFunction() || call.FunctionName() != "format" { + return false + } + overloadIDs := a.GetOverloadIDs(e.ID()) + if len(overloadIDs) != 0 { + found := false + for _, overload := range overloadIDs { + if overload == overloads.ExtFormatString { + found = true + break + } + } + if !found { + return false + } + } + formatString := call.Target() + if formatString.Kind() != ast.LiteralKind || formatString.AsLiteral().Type() != cel.StringType { + return false + } + args := call.Args() + if len(args) != 1 { + return false + } + formatArgs := args[0] + return formatArgs.Kind() == ast.ListKind + } +} + +// stringFormatChecker implements the formatStringInterpolater interface +type stringFormatChecker struct { + args []ast.Expr + argsRequested int + currArgIndex int64 + ast *ast.AST +} + +func (c *stringFormatChecker) String(arg ref.Val, locale string) (string, error) { + formatArg := c.args[c.currArgIndex] + valid, badID := c.verifyString(formatArg) + if !valid { + return "", stringFormatError(badID, c.typeOf(badID).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Decimal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", decimalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Fixed(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", fixedPointFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Scientific(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", scientificFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Binary(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.BoolType) + if !valid { + return "", binaryFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.StringType, types.BytesType) + if !valid { + return "", hexFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Octal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", octalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Arg(index int64) (ref.Val, error) { + c.argsRequested++ + c.currArgIndex = index + // return a dummy value - this is immediately passed to back to us + // through one of the FormatCallback functions, so anything will do + return types.Int(0), nil +} + +func (c *stringFormatChecker) Size() int64 { + return int64(len(c.args)) +} + +func (c *stringFormatChecker) typeOf(id int64) *cel.Type { + return c.ast.GetType(id) +} + +func (c *stringFormatChecker) verifyTypeOneOf(id int64, validTypes ...*cel.Type) bool { + t := c.typeOf(id) + if t == cel.DynType { + return true + } + for _, vt := range validTypes { + // Only check runtime type compatibility without delving deeper into parameterized types + if t.Kind() == vt.Kind() { + return true + } + } + return false +} + +func (c *stringFormatChecker) verifyString(sub ast.Expr) (bool, int64) { + paramA := cel.TypeParamType("A") + paramB := cel.TypeParamType("B") + subVerified := c.verifyTypeOneOf(sub.ID(), + cel.ListType(paramA), cel.MapType(paramA, paramB), + cel.IntType, cel.UintType, cel.DoubleType, cel.BoolType, cel.StringType, + cel.TimestampType, cel.BytesType, cel.DurationType, cel.TypeType, cel.NullType) + if !subVerified { + return false, sub.ID() + } + switch sub.Kind() { + case ast.ListKind: + for _, e := range sub.AsList().Elements() { + // recursively verify if we're dealing with a list/map + verified, id := c.verifyString(e) + if !verified { + return false, id + } + } + return true, sub.ID() + case ast.MapKind: + for _, e := range sub.AsMap().Entries() { + // recursively verify if we're dealing with a list/map + entry := e.AsMapEntry() + verified, id := c.verifyString(entry.Key()) + if !verified { + return false, id + } + verified, id = c.verifyString(entry.Value()) + if !verified { + return false, id + } + } + return true, sub.ID() + default: + return true, sub.ID() + } +} + +// helper routines for reporting common errors during string formatting static validation and +// runtime execution. + +func binaryFormatError(id int64, badType string) error { + return newFormatError(id, "only integers and bools can be formatted as binary, was given %s", badType) +} + +func decimalFormatError(id int64, badType string) error { + return newFormatError(id, "decimal clause can only be used on integers, was given %s", badType) +} + +func fixedPointFormatError(id int64, badType string) error { + return newFormatError(id, "fixed-point clause can only be used on doubles, was given %s", badType) +} + +func hexFormatError(id int64, badType string) error { + return newFormatError(id, "only integers, byte buffers, and strings can be formatted as hex, was given %s", badType) +} + +func octalFormatError(id int64, badType string) error { + return newFormatError(id, "octal clause can only be used on integers, was given %s", badType) +} + +func scientificFormatError(id int64, badType string) error { + return newFormatError(id, "scientific clause can only be used on doubles, was given %s", badType) +} + +func stringFormatError(id int64, badType string) error { + return newFormatError(id, "string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", badType) +} + +type formatError struct { + id int64 + msg string +} + +func newFormatError(id int64, msg string, args ...any) error { + return formatError{ + id: id, + msg: fmt.Sprintf(msg, args...), + } +} + +func (e formatError) Error() string { + return e.msg +} + +func (e formatError) Is(target error) bool { + return e.msg == target.Error() +} + +// stringArgList implements the formatListArgs interface. +type stringArgList struct { + args traits.Lister +} + +func (c *stringArgList) Arg(index int64) (ref.Val, error) { + if index >= c.args.Size().Value().(int64) { + return nil, fmt.Errorf("index %d out of range", index) + } + return c.args.Get(types.Int(index)), nil +} + +func (c *stringArgList) Size() int64 { + return c.args.Size().Value().(int64) +} + +// formatStringInterpolator is an interface that allows user-defined behavior +// for formatting clause implementations, as well as argument retrieval. +// Each function is expected to support the appropriate types as laid out in +// the string.format documentation, and to return an error if given an inappropriate type. +type formatStringInterpolator interface { + // String takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a string, or an error if one occurred. + String(ref.Val, string) (string, error) + + // Decimal takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a decimal integer, or an error if one occurred. + Decimal(ref.Val, string) (string, error) + + // Fixed takes an int pointer representing precision (or nil if none was given) and + // returns a function operating in a similar manner to String and Decimal, taking a + // ref.Val and locale and returning the appropriate string. A closure is returned + // so precision can be set without needing an additional function call/configuration. + Fixed(*int) func(ref.Val, string) (string, error) + + // Scientific functions identically to Fixed, except the string returned from the closure + // is expected to be in scientific notation. + Scientific(*int) func(ref.Val, string) (string, error) + + // Binary takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a binary integer, or an error if one occurred. + Binary(ref.Val, string) (string, error) + + // Hex takes a boolean that, if true, indicates the hex string output by the returned + // closure should use uppercase letters for A-F. + Hex(bool) func(ref.Val, string) (string, error) + + // Octal takes a ref.Val and a string representing the current locale identifier and + // returns the Val formatted in octal, or an error if one occurred. + Octal(ref.Val, string) (string, error) +} + +// formatListArgs is an interface that allows user-defined list-like datatypes to be used +// for formatting clause implementations. +type formatListArgs interface { + // Arg returns the ref.Val at the given index, or an error if one occurred. + Arg(int64) (ref.Val, error) + + // Size returns the length of the argument list. + Size() int64 +} + +// parseFormatString formats a string according to the string.format syntax, taking the clause implementations +// from the provided FormatCallback and the args from the given FormatList. +func parseFormatString(formatStr string, callback formatStringInterpolator, list formatListArgs, locale string) (string, error) { + i := 0 + argIndex := 0 + var builtStr strings.Builder + for i < len(formatStr) { + if formatStr[i] == '%' { + if i+1 < len(formatStr) && formatStr[i+1] == '%' { + err := builtStr.WriteByte('%') + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += 2 + continue + } else { + argAny, err := list.Arg(int64(argIndex)) + if err != nil { + return "", err + } + if i+1 >= len(formatStr) { + return "", errors.New("unexpected end of string") + } + if int64(argIndex) >= list.Size() { + return "", fmt.Errorf("index %d out of range", argIndex) + } + numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale) + if refErr != nil { + return "", refErr + } + _, err = builtStr.WriteString(val) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += numRead + argIndex++ + } + } else { + err := builtStr.WriteByte(formatStr[i]) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i++ + } + } + return builtStr.String(), nil +} + +// parseAndFormatClause parses the format clause at the start of the given string with val, and returns +// how many characters were consumed and the substituted string form of val, or an error if one occurred. +func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringInterpolator, list formatListArgs, locale string) (int, string, error) { + i := 1 + read, formatter, err := parseFormattingClause(formatStr[i:], callback) + i += read + if err != nil { + return -1, "", newParseFormatError("could not parse formatting clause", err) + } + + valStr, err := formatter(val, locale) + if err != nil { + return -1, "", newParseFormatError("error during formatting", err) + } + return i, valStr, nil +} + +func parseFormattingClause(formatStr string, callback formatStringInterpolator) (int, clauseImpl, error) { + i := 0 + read, precision, err := parsePrecision(formatStr[i:]) + i += read + if err != nil { + return -1, nil, fmt.Errorf("error while parsing precision: %w", err) + } + r := rune(formatStr[i]) + i++ + switch r { + case 's': + return i, callback.String, nil + case 'd': + return i, callback.Decimal, nil + case 'f': + return i, callback.Fixed(precision), nil + case 'e': + return i, callback.Scientific(precision), nil + case 'b': + return i, callback.Binary, nil + case 'x', 'X': + return i, callback.Hex(unicode.IsUpper(r)), nil + case 'o': + return i, callback.Octal, nil + default: + return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r) + } +} + +func parsePrecision(formatStr string) (int, *int, error) { + i := 0 + if formatStr[i] != '.' { + return i, nil, nil + } + i++ + var buffer strings.Builder + for { + if i >= len(formatStr) { + return -1, nil, errors.New("could not find end of precision specifier") + } + if !isASCIIDigit(rune(formatStr[i])) { + break + } + buffer.WriteByte(formatStr[i]) + i++ + } + precision, err := strconv.Atoi(buffer.String()) + if err != nil { + return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err) + } + return i, &precision, nil +} + +func isASCIIDigit(r rune) bool { + return r <= unicode.MaxASCII && unicode.IsDigit(r) +} + +type parseFormatError struct { + msg string + wrapped error +} + +func newParseFormatError(msg string, wrapped error) error { + return parseFormatError{msg: msg, wrapped: wrapped} +} + +func (e parseFormatError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.wrapped.Error()) +} + +func (e parseFormatError) Is(target error) bool { + return e.Error() == target.Error() +} + +func (e parseFormatError) Unwrap() error { + return e.wrapped +} + +const ( + runtimeID = int64(-1) +) diff --git a/vendor/github.com/google/cel-go/ext/guards.go b/vendor/github.com/google/cel-go/ext/guards.go index 785c8675bb..2c00bfe3a8 100644 --- a/vendor/github.com/google/cel-go/ext/guards.go +++ b/vendor/github.com/google/cel-go/ext/guards.go @@ -15,10 +15,9 @@ package ext import ( + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // function invocation guards for common call signatures within extension functions. @@ -51,10 +50,10 @@ func listStringOrError(strs []string, err error) ref.Val { return types.DefaultTypeAdapter.NativeToValue(strs) } -func macroTargetMatchesNamespace(ns string, target *exprpb.Expr) bool { - switch target.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - if target.GetIdentExpr().GetName() != ns { +func macroTargetMatchesNamespace(ns string, target ast.Expr) bool { + switch target.Kind() { + case ast.IdentKind: + if target.AsIdent() != ns { return false } return true diff --git a/vendor/github.com/google/cel-go/ext/math.go b/vendor/github.com/google/cel-go/ext/math.go index 0b9a361031..893654e7fb 100644 --- a/vendor/github.com/google/cel-go/ext/math.go +++ b/vendor/github.com/google/cel-go/ext/math.go @@ -16,14 +16,14 @@ package ext import ( "fmt" + "math" "strings" "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Math returns a cel.EnvOption to configure namespaced math helper macros and @@ -87,33 +87,311 @@ import ( // math.least('string') // parse error // math.least(a, b) // check-time error if a or b is non-numeric // math.least(dyn('string')) // runtime error +// +// # Math.BitOr +// +// Introduced at version: 1 +// +// Performs a bitwise-OR operation over two int or uint values. +// +// math.bitOr(, ) -> +// math.bitOr(, ) -> +// +// Examples: +// +// math.bitOr(1u, 2u) // returns 3u +// math.bitOr(-2, -4) // returns -2 +// +// # Math.BitAnd +// +// Introduced at version: 1 +// +// Performs a bitwise-AND operation over two int or uint values. +// +// math.bitAnd(, ) -> +// math.bitAnd(, ) -> +// +// Examples: +// +// math.bitAnd(3u, 2u) // return 2u +// math.bitAnd(3, 5) // returns 3 +// math.bitAnd(-3, -5) // returns -7 +// +// # Math.BitXor +// +// Introduced at version: 1 +// +// math.bitXor(, ) -> +// math.bitXor(, ) -> +// +// Performs a bitwise-XOR operation over two int or uint values. +// +// Examples: +// +// math.bitXor(3u, 5u) // returns 6u +// math.bitXor(1, 3) // returns 2 +// +// # Math.BitNot +// +// Introduced at version: 1 +// +// Function which accepts a single int or uint and performs a bitwise-NOT +// ones-complement of the given binary value. +// +// math.bitNot() -> +// math.bitNot() -> +// +// Examples +// +// math.bitNot(1) // returns -1 +// math.bitNot(-1) // return 0 +// math.bitNot(0u) // returns 18446744073709551615u +// +// # Math.BitShiftLeft +// +// Introduced at version: 1 +// +// Perform a left shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will be always be returned +// since the number of bits shifted is greater than or equal to the total bit +// length of the number being shifted. Negative valued bit shifts will result +// in a runtime error. +// +// math.bitShiftLeft(, ) -> +// math.bitShiftLeft(, ) -> +// +// Examples +// +// math.bitShiftLeft(1, 2) // returns 4 +// math.bitShiftLeft(-1, 2) // returns -4 +// math.bitShiftLeft(1u, 2) // return 4u +// math.bitShiftLeft(1u, 200) // returns 0u +// +// # Math.BitShiftRight +// +// Introduced at version: 1 +// +// Perform a right shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will always be returned since +// the number of bits shifted is greater than or equal to the total bit length +// of the number being shifted. Negative valued bit shifts will result in a +// runtime error. +// +// The sign bit extension will not be preserved for this operation: vacant bits +// on the left are filled with 0. +// +// math.bitShiftRight(, ) -> +// math.bitShiftRight(, ) -> +// +// Examples +// +// math.bitShiftRight(1024, 2) // returns 256 +// math.bitShiftRight(1024u, 2) // returns 256u +// math.bitShiftRight(1024u, 64) // returns 0u +// +// # Math.Ceil +// +// Introduced at version: 1 +// +// Compute the ceiling of a double value. +// +// math.ceil() -> +// +// Examples: +// +// math.ceil(1.2) // returns 2.0 +// math.ceil(-1.2) // returns -1.0 +// +// # Math.Floor +// +// Introduced at version: 1 +// +// Compute the floor of a double value. +// +// math.floor() -> +// +// Examples: +// +// math.floor(1.2) // returns 1.0 +// math.floor(-1.2) // returns -2.0 +// +// # Math.Round +// +// Introduced at version: 1 +// +// Rounds the double value to the nearest whole number with ties rounding away +// from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. +// +// math.round() -> +// +// Examples: +// +// math.round(1.2) // returns 1.0 +// math.round(1.5) // returns 2.0 +// math.round(-1.5) // returns -2.0 +// +// # Math.Trunc +// +// Introduced at version: 1 +// +// Truncates the fractional portion of the double value. +// +// math.trunc() -> +// +// Examples: +// +// math.trunc(-1.3) // returns -1.0 +// math.trunc(1.3) // returns 1.0 +// +// # Math.Abs +// +// Introduced at version: 1 +// +// Returns the absolute value of the numeric type provided as input. If the +// value is NaN, the output is NaN. If the input is int64 min, the function +// will result in an overflow error. +// +// math.abs() -> +// math.abs() -> +// math.abs() -> +// +// Examples: +// +// math.abs(-1) // returns 1 +// math.abs(1) // returns 1 +// math.abs(-9223372036854775808) // overflow error +// +// # Math.Sign +// +// Introduced at version: 1 +// +// Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +// uint depending on the overload. For floating point values, if NaN is +// provided as input, the output is also NaN. The implementation does not +// differentiate between positive and negative zero. +// +// math.sign() -> +// math.sign() -> +// math.sign() -> +// +// Examples: +// +// math.sign(-42) // returns -1 +// math.sign(0) // returns 0 +// math.sign(42) // returns 1 +// +// # Math.IsInf +// +// Introduced at version: 1 +// +// Returns true if the input double value is -Inf or +Inf. +// +// math.isInf() -> +// +// Examples: +// +// math.isInf(1.0/0.0) // returns true +// math.isInf(1.2) // returns false +// +// # Math.IsNaN +// +// Introduced at version: 1 +// +// Returns true if the input double value is NaN, false otherwise. +// +// math.isNaN() -> +// +// Examples: +// +// math.isNaN(0.0/0.0) // returns true +// math.isNaN(1.2) // returns false +// +// # Math.IsFinite +// +// Introduced at version: 1 +// +// Returns true if the value is a finite number. Equivalent in behavior to: +// !math.isNaN(double) && !math.isInf(double) +// +// math.isFinite() -> +// +// Examples: +// +// math.isFinite(0.0/0.0) // returns false +// math.isFinite(1.2) // returns true func Math() cel.EnvOption { - return cel.Lib(mathLib{}) + return cel.Lib(&mathLib{version: math.MaxUint32}) } const ( mathNamespace = "math" leastMacro = "least" greatestMacro = "greatest" - minFunc = "math.@min" - maxFunc = "math.@max" + + // Min-max functions + minFunc = "math.@min" + maxFunc = "math.@max" + + // Rounding functions + ceilFunc = "math.ceil" + floorFunc = "math.floor" + roundFunc = "math.round" + truncFunc = "math.trunc" + + // Floating point helper functions + isInfFunc = "math.isInf" + isNanFunc = "math.isNaN" + isFiniteFunc = "math.isFinite" + + // Signedness functions + absFunc = "math.abs" + signFunc = "math.sign" + + // Bitwise functions + bitAndFunc = "math.bitAnd" + bitOrFunc = "math.bitOr" + bitXorFunc = "math.bitXor" + bitNotFunc = "math.bitNot" + bitShiftLeftFunc = "math.bitShiftLeft" + bitShiftRightFunc = "math.bitShiftRight" ) -type mathLib struct{} +var ( + errIntOverflow = types.NewErr("integer overflow") +) + +type MathOption func(*mathLib) *mathLib + +func MathVersion(version uint32) MathOption { + return func(lib *mathLib) *mathLib { + lib.version = version + return lib + } +} + +type mathLib struct { + version uint32 +} // LibraryName implements the SingletonLibrary interface method. -func (mathLib) LibraryName() string { +func (*mathLib) LibraryName() string { return "cel.lib.ext.math" } // CompileOptions implements the Library interface method. -func (mathLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ +func (lib *mathLib) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ cel.Macros( // math.least(num, ...) - cel.NewReceiverVarArgMacro(leastMacro, mathLeast), + cel.ReceiverVarArgMacro(leastMacro, mathLeast), // math.greatest(num, ...) - cel.NewReceiverVarArgMacro(greatestMacro, mathGreatest), + cel.ReceiverVarArgMacro(greatestMacro, mathGreatest), ), cel.Function(minFunc, cel.Overload("math_@min_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, @@ -180,64 +458,149 @@ func (mathLib) CompileOptions() []cel.EnvOption { cel.UnaryBinding(maxList)), ), } + if lib.version >= 1 { + opts = append(opts, + // Rounding function declarations + cel.Function(ceilFunc, + cel.Overload("math_ceil_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(ceil))), + cel.Function(floorFunc, + cel.Overload("math_floor_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(floor))), + cel.Function(roundFunc, + cel.Overload("math_round_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(round))), + cel.Function(truncFunc, + cel.Overload("math_trunc_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(trunc))), + + // Floating point helpers + cel.Function(isInfFunc, + cel.Overload("math_isInf_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isInf))), + cel.Function(isNanFunc, + cel.Overload("math_isNaN_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isNaN))), + cel.Function(isFiniteFunc, + cel.Overload("math_isFinite_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isFinite))), + + // Signedness functions + cel.Function(absFunc, + cel.Overload("math_abs_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(absDouble)), + cel.Overload("math_abs_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(absInt)), + cel.Overload("math_abs_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + ), + cel.Function(signFunc, + cel.Overload("math_sign_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(sign)), + ), + + // Bitwise operator declarations + cel.Function(bitAndFunc, + cel.Overload("math_bitAnd_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitAndPairInt)), + cel.Overload("math_bitAnd_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitAndPairUint)), + ), + cel.Function(bitOrFunc, + cel.Overload("math_bitOr_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitOrPairInt)), + cel.Overload("math_bitOr_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitOrPairUint)), + ), + cel.Function(bitXorFunc, + cel.Overload("math_bitXor_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitXorPairInt)), + cel.Overload("math_bitXor_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitXorPairUint)), + ), + cel.Function(bitNotFunc, + cel.Overload("math_bitNot_int_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(bitNotInt)), + cel.Overload("math_bitNot_uint_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(bitNotUint)), + ), + cel.Function(bitShiftLeftFunc, + cel.Overload("math_bitShiftLeft_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftLeftIntInt)), + cel.Overload("math_bitShiftLeft_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftLeftUintInt)), + ), + cel.Function(bitShiftRightFunc, + cel.Overload("math_bitShiftRight_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftRightIntInt)), + cel.Overload("math_bitShiftRight_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftRightUintInt)), + ), + ) + } + return opts } // ProgramOptions implements the Library interface method. -func (mathLib) ProgramOptions() []cel.ProgramOption { +func (*mathLib) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func mathLeast(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, meh.NewError(target.GetId(), "math.least() requires at least one argument") + return nil, meh.NewError(target.ID(), "math.least() requires at least one argument") case 1: - if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { - return meh.GlobalCall(minFunc, args[0]), nil + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return meh.NewCall(minFunc, args[0]), nil } - return nil, meh.NewError(args[0].GetId(), "math.least() invalid single argument value") + return nil, meh.NewError(args[0].ID(), "math.least() invalid single argument value") case 2: err := checkInvalidArgs(meh, "math.least()", args) if err != nil { return nil, err } - return meh.GlobalCall(minFunc, args...), nil + return meh.NewCall(minFunc, args...), nil default: err := checkInvalidArgs(meh, "math.least()", args) if err != nil { return nil, err } - return meh.GlobalCall(minFunc, meh.NewList(args...)), nil + return meh.NewCall(minFunc, meh.NewList(args...)), nil } } -func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func mathGreatest(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, meh.NewError(target.GetId(), "math.greatest() requires at least one argument") + return nil, mef.NewError(target.ID(), "math.greatest() requires at least one argument") case 1: - if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { - return meh.GlobalCall(maxFunc, args[0]), nil + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return mef.NewCall(maxFunc, args[0]), nil } - return nil, meh.NewError(args[0].GetId(), "math.greatest() invalid single argument value") + return nil, mef.NewError(args[0].ID(), "math.greatest() invalid single argument value") case 2: - err := checkInvalidArgs(meh, "math.greatest()", args) + err := checkInvalidArgs(mef, "math.greatest()", args) if err != nil { return nil, err } - return meh.GlobalCall(maxFunc, args...), nil + return mef.NewCall(maxFunc, args...), nil default: - err := checkInvalidArgs(meh, "math.greatest()", args) + err := checkInvalidArgs(mef, "math.greatest()", args) if err != nil { return nil, err } - return meh.GlobalCall(maxFunc, meh.NewList(args...)), nil + return mef.NewCall(maxFunc, mef.NewList(args...)), nil } } @@ -245,6 +608,165 @@ func identity(val ref.Val) ref.Val { return val } +func ceil(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Ceil(float64(v))) +} + +func floor(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Floor(float64(v))) +} + +func round(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Round(float64(v))) +} + +func trunc(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Trunc(float64(v))) +} + +func isInf(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsInf(float64(v), 0)) +} + +func isFinite(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Bool(!math.IsInf(v, 0) && !math.IsNaN(v)) +} + +func isNaN(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsNaN(float64(v))) +} + +func absDouble(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Double(math.Abs(v)) +} + +func absInt(val ref.Val) ref.Val { + v := int64(val.(types.Int)) + if v == math.MinInt64 { + return errIntOverflow + } + if v >= 0 { + return val + } + return -types.Int(v) +} + +func sign(val ref.Val) ref.Val { + switch v := val.(type) { + case types.Double: + if isNaN(v) == types.True { + return v + } + zero := types.Double(0) + if v > zero { + return types.Double(1) + } + if v < zero { + return types.Double(-1) + } + return zero + case types.Int: + return v.Compare(types.IntZero) + case types.Uint: + if v == types.Uint(0) { + return types.Uint(0) + } + return types.Uint(1) + default: + return maybeSuffixError(val, "math.sign") + } +} + +func bitAndPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l & r +} + +func bitAndPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l & r +} + +func bitOrPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l | r +} + +func bitOrPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l | r +} + +func bitXorPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l ^ r +} + +func bitXorPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l ^ r +} + +func bitNotInt(value ref.Val) ref.Val { + v := value.(types.Int) + return ^v +} + +func bitNotUint(value ref.Val) ref.Val { + v := value.(types.Uint) + return ^v +} + +func bitShiftLeftIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftLeftUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftRightIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return types.Int(types.Uint(v) >> bs) +} + +func bitShiftRightUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return v >> bs +} + func minPair(first, second ref.Val) ref.Val { cmp, ok := first.(traits.Comparer) if !ok { @@ -311,49 +833,49 @@ func maxList(numList ref.Val) ref.Val { } } -func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *cel.Error { +func checkInvalidArgs(meh cel.MacroExprFactory, funcName string, args []ast.Expr) *cel.Error { for _, arg := range args { err := checkInvalidArgLiteral(funcName, arg) if err != nil { - return meh.NewError(arg.GetId(), err.Error()) + return meh.NewError(arg.ID(), err.Error()) } } return nil } -func checkInvalidArgLiteral(funcName string, arg *exprpb.Expr) error { - if !isValidArgType(arg) { +func checkInvalidArgLiteral(funcName string, arg ast.Expr) error { + if !isNumericArgType(arg) { return fmt.Errorf("%s simple literal arguments must be numeric", funcName) } return nil } -func isValidArgType(arg *exprpb.Expr) bool { - switch arg.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - c := arg.GetConstExpr() - switch c.GetConstantKind().(type) { - case *exprpb.Constant_DoubleValue, *exprpb.Constant_Int64Value, *exprpb.Constant_Uint64Value: +func isNumericArgType(arg ast.Expr) bool { + switch arg.Kind() { + case ast.LiteralKind: + c := ref.Val(arg.AsLiteral()) + switch c.(type) { + case types.Double, types.Int, types.Uint: return true default: return false } - case *exprpb.Expr_ListExpr, *exprpb.Expr_StructExpr: + case ast.ListKind, ast.MapKind, ast.StructKind: return false default: return true } } -func isListLiteralWithValidArgs(arg *exprpb.Expr) bool { - switch arg.GetExprKind().(type) { - case *exprpb.Expr_ListExpr: - list := arg.GetListExpr() - if len(list.GetElements()) == 0 { +func isListLiteralWithNumericArgs(arg ast.Expr) bool { + switch arg.Kind() { + case ast.ListKind: + list := arg.AsList() + if list.Size() == 0 { return false } - for _, e := range list.GetElements() { - if !isValidArgType(e) { + for _, e := range list.Elements() { + if !isNumericArgType(e) { return false } } diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index 0b5fc38ca9..35b0f1e390 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -15,6 +15,7 @@ package ext import ( + "errors" "fmt" "reflect" "strings" @@ -77,12 +78,45 @@ var ( // same advice holds if you are using custom type adapters and type providers. The native type // provider composes over whichever type adapter and provider is configured in the cel.Env at // the time that it is invoked. -func NativeTypes(refTypes ...any) cel.EnvOption { +// +// There is also the possibility to rename the fields of native structs by setting the `cel` tag +// for fields you want to override. In order to enable this feature, pass in the `EnableStructTag` +// option. Here is an example to see it in action: +// +// ```go +// package identity +// +// type Account struct { +// ID int +// OwnerName string `cel:"owner"` +// } +// +// ``` +// +// The `OwnerName` field is now accessible in CEL via `owner`, e.g. `identity.Account{owner: 'bob'}`. +// In case there are duplicated field names in the struct, an error will be returned. +func NativeTypes(args ...any) cel.EnvOption { return func(env *cel.Env) (*cel.Env, error) { - tp, err := newNativeTypeProvider(env.CELTypeAdapter(), env.CELTypeProvider(), refTypes...) + nativeTypes := make([]any, 0, len(args)) + tpOptions := nativeTypeOptions{} + + for _, v := range args { + switch v := v.(type) { + case NativeTypesOption: + err := v(&tpOptions) + if err != nil { + return nil, err + } + default: + nativeTypes = append(nativeTypes, v) + } + } + + tp, err := newNativeTypeProvider(tpOptions, env.CELTypeAdapter(), env.CELTypeProvider(), nativeTypes...) if err != nil { return nil, err } + env, err = cel.CustomTypeAdapter(tp)(env) if err != nil { return nil, err @@ -91,22 +125,43 @@ func NativeTypes(refTypes ...any) cel.EnvOption { } } -func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { +// NativeTypesOption is a functional interface for configuring handling of native types. +type NativeTypesOption func(*nativeTypeOptions) error + +type nativeTypeOptions struct { + // parseStructTags controls if CEL should support struct field renames, by parsing + // struct field tags. + parseStructTags bool +} + +// ParseStructTags configures if native types field names should be overridable by CEL struct tags. +func ParseStructTags(enabled bool) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.parseStructTags = true + return nil + } +} + +func newNativeTypeProvider(tpOptions nativeTypeOptions, adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { nativeTypes := make(map[string]*nativeType, len(refTypes)) for _, refType := range refTypes { switch rt := refType.(type) { case reflect.Type: - t, err := newNativeType(rt) + result, err := newNativeTypes(tpOptions.parseStructTags, rt) if err != nil { return nil, err } - nativeTypes[t.TypeName()] = t + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } case reflect.Value: - t, err := newNativeType(rt.Type()) + result, err := newNativeTypes(tpOptions.parseStructTags, rt.Type()) if err != nil { return nil, err } - nativeTypes[t.TypeName()] = t + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } default: return nil, fmt.Errorf("unsupported native type: %v (%T) must be reflect.Type or reflect.Value", rt, rt) } @@ -115,6 +170,7 @@ func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTy nativeTypes: nativeTypes, baseAdapter: adapter, baseProvider: provider, + options: tpOptions, }, nil } @@ -122,6 +178,7 @@ type nativeTypeProvider struct { nativeTypes map[string]*nativeType baseAdapter types.Adapter baseProvider types.Provider + options nativeTypeOptions } // EnumValue proxies to the types.Provider configured at the times the NativeTypes @@ -151,6 +208,52 @@ func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool return tp.baseProvider.FindStructType(typeName) } +func toFieldName(parseStructTag bool, f reflect.StructField) string { + if !parseStructTag { + return f.Name + } + + if name, found := f.Tag.Lookup("cel"); found { + return name + } + + return f.Name +} + +// FindStructFieldNames looks up the type definition first from the native types, then from +// the backing provider type set. If found, a set of field names corresponding to the type +// will be returned. +func (tp *nativeTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + if t, found := tp.nativeTypes[typeName]; found { + fieldCount := t.refType.NumField() + fields := make([]string, fieldCount) + for i := 0; i < fieldCount; i++ { + fields[i] = toFieldName(tp.options.parseStructTags, t.refType.Field(i)) + } + return fields, true + } + if celTypeFields, found := tp.baseProvider.FindStructFieldNames(typeName); found { + return celTypeFields, true + } + return tp.baseProvider.FindStructFieldNames(typeName) +} + +// valueFieldByName retrieves the corresponding reflect.Value field for the given field name, by +// searching for a matching field tag value or field name. +func valueFieldByName(parseStructTags bool, target reflect.Value, fieldName string) reflect.Value { + if !parseStructTags { + return target.FieldByName(fieldName) + } + + for i := 0; i < target.Type().NumField(); i++ { + f := target.Type().Field(i) + if toFieldName(parseStructTags, f) == fieldName { + return target.FieldByIndex(f.Index) + } + } + return reflect.Value{} +} + // FindStructFieldType looks up a native type's field definition, and if the type name is not a native // type then proxies to the composed types.Provider func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { @@ -170,13 +273,13 @@ func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (* Type: celType, IsSet: func(obj any) bool { refVal := reflect.Indirect(reflect.ValueOf(obj)) - refField := refVal.FieldByName(fieldName) + refField := valueFieldByName(tp.options.parseStructTags, refVal, fieldName) return !refField.IsZero() }, GetFrom: func(obj any) (any, error) { refVal := reflect.Indirect(reflect.ValueOf(obj)) - refField := refVal.FieldByName(fieldName) - return getFieldValue(tp, refField), nil + refField := valueFieldByName(tp.options.parseStructTags, refVal, fieldName) + return getFieldValue(refField), nil }, }, true } @@ -227,6 +330,9 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { case []byte: return tp.baseAdapter.NativeToValue(val) default: + if refVal.Type().Elem() == reflect.TypeOf(byte(0)) { + return tp.baseAdapter.NativeToValue(val) + } return types.NewDynamicList(tp, val) } case reflect.Map: @@ -237,7 +343,7 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { time.Time: return tp.baseAdapter.NativeToValue(val) default: - return newNativeObject(tp, val, rawVal) + return tp.newNativeObject(val, rawVal) } default: return tp.baseAdapter.NativeToValue(val) @@ -297,13 +403,13 @@ func convertToCelType(refType reflect.Type) (*cel.Type, bool) { return nil, false } -func newNativeObject(adapter types.Adapter, val any, refValue reflect.Value) ref.Val { - valType, err := newNativeType(refValue.Type()) +func (tp *nativeTypeProvider) newNativeObject(val any, refValue reflect.Value) ref.Val { + valType, err := newNativeType(tp.options.parseStructTags, refValue.Type()) if err != nil { return types.NewErr(err.Error()) } return &nativeObj{ - Adapter: adapter, + Adapter: tp, val: val, valType: valType, refValue: refValue, @@ -350,12 +456,13 @@ func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) { if !fieldValue.IsValid() || fieldValue.IsZero() { continue } + fieldName := toFieldName(o.valType.parseStructTags, fieldType) fieldCELVal := o.NativeToValue(fieldValue.Interface()) fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType) if err != nil { return nil, err } - fields[fieldType.Name] = fieldJSONVal.(*structpb.Value) + fields[fieldName] = fieldJSONVal.(*structpb.Value) } return &structpb.Struct{Fields: fields}, nil } @@ -447,7 +554,47 @@ func (o *nativeObj) Value() any { return o.val } -func newNativeType(rawType reflect.Type) (*nativeType, error) { +func newNativeTypes(parseStructTags bool, rawType reflect.Type) ([]*nativeType, error) { + nt, err := newNativeType(parseStructTags, rawType) + if err != nil { + return nil, err + } + result := []*nativeType{nt} + + alreadySeen := make(map[string]struct{}) + var iterateStructMembers func(reflect.Type) + iterateStructMembers = func(t reflect.Type) { + if k := t.Kind(); k == reflect.Pointer || k == reflect.Slice || k == reflect.Array || k == reflect.Map { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return + } + if _, seen := alreadySeen[t.String()]; seen { + return + } + alreadySeen[t.String()] = struct{}{} + nt, ntErr := newNativeType(parseStructTags, t) + if ntErr != nil { + err = ntErr + return + } + result = append(result, nt) + + for idx := 0; idx < t.NumField(); idx++ { + iterateStructMembers(t.Field(idx).Type) + } + } + iterateStructMembers(rawType) + + return result, err +} + +var ( + errDuplicatedFieldName = errors.New("field name already exists in struct") +) + +func newNativeType(parseStructTags bool, rawType reflect.Type) (*nativeType, error) { refType := rawType if refType.Kind() == reflect.Pointer { refType = refType.Elem() @@ -455,15 +602,34 @@ func newNativeType(rawType reflect.Type) (*nativeType, error) { if !isValidObjectType(refType) { return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType) } + + // Since naming collisions can only happen with struct tag parsing, we only check for them if it is enabled. + if parseStructTags { + fieldNames := make(map[string]struct{}) + + for idx := 0; idx < refType.NumField(); idx++ { + field := refType.Field(idx) + fieldName := toFieldName(parseStructTags, field) + + if _, found := fieldNames[fieldName]; found { + return nil, fmt.Errorf("invalid field name `%s` in struct `%s`: %w", fieldName, refType.Name(), errDuplicatedFieldName) + } else { + fieldNames[fieldName] = struct{}{} + } + } + } + return &nativeType{ - typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), - refType: refType, + typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), + refType: refType, + parseStructTags: parseStructTags, }, nil } type nativeType struct { - typeName string - refType reflect.Type + typeName string + refType reflect.Type + parseStructTags bool } // ConvertToNative implements ref.Val.ConvertToNative. @@ -511,9 +677,26 @@ func (t *nativeType) Value() any { return t.typeName } +// fieldByName returns the corresponding reflect.StructField for the give name either by matching +// field tag or field name. +func (t *nativeType) fieldByName(fieldName string) (reflect.StructField, bool) { + if !t.parseStructTags { + return t.refType.FieldByName(fieldName) + } + + for i := 0; i < t.refType.NumField(); i++ { + f := t.refType.Field(i) + if toFieldName(t.parseStructTags, f) == fieldName { + return f, true + } + } + + return reflect.StructField{}, false +} + // hasField returns whether a field name has a corresponding Golang reflect.StructField func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { - f, found := t.refType.FieldByName(fieldName) + f, found := t.fieldByName(fieldName) if !found || !f.IsExported() || !isSupportedType(f.Type) { return reflect.StructField{}, false } @@ -521,21 +704,16 @@ func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { } func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val { - return adapter.NativeToValue(getFieldValue(adapter, refField)) + return adapter.NativeToValue(getFieldValue(refField)) } -func getFieldValue(adapter types.Adapter, refField reflect.Value) any { +func getFieldValue(refField reflect.Value) any { if refField.IsZero() { switch refField.Kind() { - case reflect.Array, reflect.Slice: - return types.NewDynamicList(adapter, []ref.Val{}) - case reflect.Map: - return types.NewDynamicMap(adapter, map[ref.Val]ref.Val{}) case reflect.Struct: if refField.Type() == timestampType { - return types.Timestamp{Time: time.Unix(0, 0)} + return time.Unix(0, 0) } - return reflect.New(refField.Type()).Elem().Interface() case reflect.Pointer: return reflect.New(refField.Type().Elem()).Interface() } diff --git a/vendor/github.com/google/cel-go/ext/protos.go b/vendor/github.com/google/cel-go/ext/protos.go index a7ca27a6a2..68796f60ad 100644 --- a/vendor/github.com/google/cel-go/ext/protos.go +++ b/vendor/github.com/google/cel-go/ext/protos.go @@ -16,8 +16,7 @@ package ext import ( "github.com/google/cel-go/cel" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" ) // Protos returns a cel.EnvOption to configure extended macros and functions for @@ -72,9 +71,9 @@ func (protoLib) CompileOptions() []cel.EnvOption { return []cel.EnvOption{ cel.Macros( // proto.getExt(msg, select_expression) - cel.NewReceiverMacro(getExtension, 2, getProtoExt), + cel.ReceiverMacro(getExtension, 2, getProtoExt), // proto.hasExt(msg, select_expression) - cel.NewReceiverMacro(hasExtension, 2, hasProtoExt), + cel.ReceiverMacro(hasExtension, 2, hasProtoExt), ), } } @@ -85,56 +84,56 @@ func (protoLib) ProgramOptions() []cel.ProgramOption { } // hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message. -func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func hasProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } - extensionField, err := getExtFieldName(meh, args[1]) + extensionField, err := getExtFieldName(mef, args[1]) if err != nil { return nil, err } - return meh.PresenceTest(args[0], extensionField), nil + return mef.NewPresenceTest(args[0], extensionField), nil } // getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message. -func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func getProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } - extFieldName, err := getExtFieldName(meh, args[1]) + extFieldName, err := getExtFieldName(mef, args[1]) if err != nil { return nil, err } - return meh.Select(args[0], extFieldName), nil + return mef.NewSelect(args[0], extFieldName), nil } -func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *cel.Error) { +func getExtFieldName(mef cel.MacroExprFactory, expr ast.Expr) (string, *cel.Error) { isValid := false extensionField := "" - switch expr.GetExprKind().(type) { - case *exprpb.Expr_SelectExpr: + switch expr.Kind() { + case ast.SelectKind: extensionField, isValid = validateIdentifier(expr) } if !isValid { - return "", meh.NewError(expr.GetId(), "invalid extension field") + return "", mef.NewError(expr.ID(), "invalid extension field") } return extensionField, nil } -func validateIdentifier(expr *exprpb.Expr) (string, bool) { - switch expr.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - return expr.GetIdentExpr().GetName(), true - case *exprpb.Expr_SelectExpr: - sel := expr.GetSelectExpr() - if sel.GetTestOnly() { +func validateIdentifier(expr ast.Expr) (string, bool) { + switch expr.Kind() { + case ast.IdentKind: + return expr.AsIdent(), true + case ast.SelectKind: + sel := expr.AsSelect() + if sel.IsTestOnly() { return "", false } - opStr, isIdent := validateIdentifier(sel.GetOperand()) + opStr, isIdent := validateIdentifier(sel.Operand()) if !isIdent { return "", false } - return opStr + "." + sel.GetField(), true + return opStr + "." + sel.FieldName(), true default: return "", false } diff --git a/vendor/github.com/google/cel-go/ext/sets.go b/vendor/github.com/google/cel-go/ext/sets.go index 833c15f616..7e94166552 100644 --- a/vendor/github.com/google/cel-go/ext/sets.go +++ b/vendor/github.com/google/cel-go/ext/sets.go @@ -19,6 +19,8 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" @@ -119,6 +121,68 @@ func (setsLib) ProgramOptions() []cel.ProgramOption { } } +// NewSetMembershipOptimizer rewrites set membership tests using the `in` operator against a list +// of constant values of enum, int, uint, string, or boolean type into a set membership test against +// a map where the map keys are the elements of the list. +func NewSetMembershipOptimizer() (cel.ASTOptimizer, error) { + return setsLib{}, nil +} + +func (setsLib) Optimize(ctx *cel.OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + matches := ast.MatchDescendants(root, matchInConstantList(a)) + for _, match := range matches { + call := match.AsCall() + listArg := call.Args()[1] + entries := make([]ast.EntryExpr, len(listArg.AsList().Elements())) + for i, elem := range listArg.AsList().Elements() { + var entry ast.EntryExpr + if r, found := a.ReferenceMap()[elem.ID()]; found && r.Value != nil { + entry = ctx.NewMapEntry(ctx.NewLiteral(r.Value), ctx.NewLiteral(types.True), false) + } else { + entry = ctx.NewMapEntry(elem, ctx.NewLiteral(types.True), false) + } + entries[i] = entry + } + mapArg := ctx.NewMap(entries) + ctx.UpdateExpr(listArg, mapArg) + } + return a +} + +func matchInConstantList(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if call.FunctionName() != operators.In { + return false + } + aggregateVal := call.Args()[1] + if aggregateVal.Kind() != ast.ListKind { + return false + } + listVal := aggregateVal.AsList() + for _, elem := range listVal.Elements() { + if r, found := a.ReferenceMap()[elem.ID()]; found { + if r.Value != nil { + continue + } + } + if elem.Kind() != ast.LiteralKind { + return false + } + lit := elem.AsLiteral() + if !(lit.Type() == cel.StringType || lit.Type() == cel.IntType || + lit.Type() == cel.UintType || lit.Type() == cel.BoolType) { + return false + } + } + return true + } +} + func setsIntersects(listA, listB ref.Val) ref.Val { lA := listA.(traits.Lister) lB := listB.(traits.Lister) diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go index 88c119f2b0..2e20f1e4c4 100644 --- a/vendor/github.com/google/cel-go/ext/strings.go +++ b/vendor/github.com/google/cel-go/ext/strings.go @@ -21,19 +21,16 @@ import ( "fmt" "math" "reflect" - "sort" "strings" "unicode" "unicode/utf8" "golang.org/x/text/language" - "golang.org/x/text/message" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - "github.com/google/cel-go/interpreter" ) const ( @@ -99,7 +96,7 @@ const ( // "a map inside a list: %s".format([[1, 2, 3, {"a": "x", "b": "y", "c": "z"}]]) // returns "a map inside a list: [1, 2, 3, {"a":"x", "b":"y", "c":"d"}]" // "true bool: %s - false bool: %s\nbinary bool: %b".format([true, false, true]) // returns "true bool: true - false bool: false\nbinary bool: 1" // -// Passing an incorrect type (an integer to `%s`) is considered an error, as well as attempting +// Passing an incorrect type (a string to `%b`) is considered an error, as well as attempting // to use more formatting clauses than there are arguments (`%d %d %d` while passing two ints, for instance). // If compile-time checking is enabled, and the formatting string is a constant, and the argument list is a literal, // then letting any arguments go unused/unformatted is also considered an error. @@ -205,6 +202,8 @@ const ( // 'hello hello'.replace('he', 'we', -1) // returns 'wello wello' // 'hello hello'.replace('he', 'we', 1) // returns 'wello hello' // 'hello hello'.replace('he', 'we', 0) // returns 'hello hello' +// 'hello hello'.replace('', '_') // returns '_h_e_l_l_o_ _h_e_l_l_o_' +// 'hello hello'.replace('h', '') // returns 'ello ello' // // # Split // @@ -270,8 +269,26 @@ const ( // // 'TacoCat'.upperAscii() // returns 'TACOCAT' // 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' +// +// # Reverse +// +// Introduced at version: 3 +// +// Returns a new string whose characters are the same as the target string, only formatted in +// reverse order. +// This function relies on converting strings to rune arrays in order to reverse +// +// .reverse() -> +// +// Examples: +// +// 'gums'.reverse() // returns 'smug' +// 'John Smith'.reverse() // returns 'htimS nhoJ' func Strings(options ...StringsOption) cel.EnvOption { - s := &stringLib{version: math.MaxUint32} + s := &stringLib{ + version: math.MaxUint32, + validateFormat: true, + } for _, o := range options { s = o(s) } @@ -279,8 +296,9 @@ func Strings(options ...StringsOption) cel.EnvOption { } type stringLib struct { - locale string - version uint32 + locale string + version uint32 + validateFormat bool } // LibraryName implements the SingletonLibrary interface method. @@ -317,6 +335,17 @@ func StringsVersion(version uint32) StringsOption { } } +// StringsValidateFormatCalls validates type-checked ASTs to ensure that string.format() calls have +// valid formatting clauses and valid argument types for each clause. +// +// Enabled by default. +func StringsValidateFormatCalls(value bool) StringsOption { + return func(s *stringLib) *stringLib { + s.validateFormat = value + return s + } +} + // CompileOptions implements the Library interface method. func (lib *stringLib) CompileOptions() []cel.EnvOption { formatLocale := "en_US" @@ -440,13 +469,15 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.FunctionBinding(func(args ...ref.Val) ref.Val { s := string(args[0].(types.String)) formatArgs := args[1].(traits.Lister) - return stringOrError(interpreter.ParseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) + return stringOrError(parseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) }))), cel.Function("strings.quote", cel.Overload("strings_quote", []*cel.Type{cel.StringType}, cel.StringType, cel.UnaryBinding(func(str ref.Val) ref.Val { s := str.(types.String) return stringOrError(quote(string(s))) - })))) + }))), + + cel.ASTValidators(stringFormatValidator{})) } if lib.version >= 2 { @@ -471,7 +502,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.UnaryBinding(func(list ref.Val) ref.Val { l, err := list.ConvertToNative(stringListType) if err != nil { - return types.NewErr(err.Error()) + return types.WrapErr(err) } return stringOrError(join(l.([]string))) })), @@ -479,13 +510,26 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.BinaryBinding(func(list, delim ref.Val) ref.Val { l, err := list.ConvertToNative(stringListType) if err != nil { - return types.NewErr(err.Error()) + return types.WrapErr(err) } d := delim.(types.String) return stringOrError(joinSeparator(l.([]string), string(d))) }))), ) } + if lib.version >= 3 { + opts = append(opts, + cel.Function("reverse", + cel.MemberOverload("reverse", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(reverse(string(s))) + }))), + ) + } + if lib.validateFormat { + opts = append(opts, cel.ASTValidators(stringFormatValidator{})) + } return opts } @@ -636,6 +680,14 @@ func upperASCII(str string) (string, error) { return string(runes), nil } +func reverse(str string) (string, error) { + chars := []rune(str) + for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 { + chars[i], chars[j] = chars[j], chars[i] + } + return string(chars), nil +} + func joinSeparator(strs []string, separator string) (string, error) { return strings.Join(strs, separator), nil } @@ -661,238 +713,6 @@ func joinValSeparator(strs traits.Lister, separator string) (string, error) { return sb.String(), nil } -type clauseImpl func(ref.Val, string) (string, error) - -func clauseForType(argType ref.Type) (clauseImpl, error) { - switch argType { - case types.IntType, types.UintType: - return formatDecimal, nil - case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType: - return FormatString, nil - case types.TimestampType, types.DurationType: - // special case to ensure timestamps/durations get printed as CEL literals - return func(arg ref.Val, locale string) (string, error) { - argStrVal := arg.ConvertToType(types.StringType) - argStr := argStrVal.Value().(string) - if arg.Type() == types.TimestampType { - return fmt.Sprintf("timestamp(%q)", argStr), nil - } - if arg.Type() == types.DurationType { - return fmt.Sprintf("duration(%q)", argStr), nil - } - return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName()) - }, nil - case types.ListType: - return formatList, nil - case types.MapType: - return formatMap, nil - case types.DoubleType: - // avoid formatFixed so we can output a period as the decimal separator in order - // to always be a valid CEL literal - return func(arg ref.Val, locale string) (string, error) { - argDouble, ok := arg.Value().(float64) - if !ok { - return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName()) - } - fmtStr := fmt.Sprintf("%%.%df", defaultPrecision) - return fmt.Sprintf(fmtStr, argDouble), nil - }, nil - case types.TypeType: - return func(arg ref.Val, locale string) (string, error) { - return fmt.Sprintf("type(%s)", arg.Value().(string)), nil - }, nil - default: - return nil, fmt.Errorf("no formatting function for %s", argType.TypeName()) - } -} - -func formatList(arg ref.Val, locale string) (string, error) { - argList := arg.(traits.Lister) - argIterator := argList.Iterator() - var listStrBuilder strings.Builder - _, err := listStrBuilder.WriteRune('[') - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - for argIterator.HasNext() == types.True { - member := argIterator.Next() - memberFormat, err := clauseForType(member.Type()) - if err != nil { - return "", err - } - unquotedStr, err := memberFormat(member, locale) - if err != nil { - return "", err - } - str := quoteForCEL(member, unquotedStr) - _, err = listStrBuilder.WriteString(str) - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - if argIterator.HasNext() == types.True { - _, err = listStrBuilder.WriteString(", ") - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - } - } - _, err = listStrBuilder.WriteRune(']') - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - return listStrBuilder.String(), nil -} - -func formatMap(arg ref.Val, locale string) (string, error) { - argMap := arg.(traits.Mapper) - argIterator := argMap.Iterator() - type mapPair struct { - key string - value string - } - argPairs := make([]mapPair, argMap.Size().Value().(int64)) - i := 0 - for argIterator.HasNext() == types.True { - key := argIterator.Next() - var keyFormat clauseImpl - switch key.Type() { - case types.StringType, types.BoolType: - keyFormat = FormatString - case types.IntType, types.UintType: - keyFormat = formatDecimal - default: - return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName()) - } - unquotedKeyStr, err := keyFormat(key, locale) - if err != nil { - return "", err - } - keyStr := quoteForCEL(key, unquotedKeyStr) - value, found := argMap.Find(key) - if !found { - return "", fmt.Errorf("could not find key: %q", key) - } - valueFormat, err := clauseForType(value.Type()) - if err != nil { - return "", err - } - unquotedValueStr, err := valueFormat(value, locale) - if err != nil { - return "", err - } - valueStr := quoteForCEL(value, unquotedValueStr) - argPairs[i] = mapPair{keyStr, valueStr} - i++ - } - sort.SliceStable(argPairs, func(x, y int) bool { - return argPairs[x].key < argPairs[y].key - }) - var mapStrBuilder strings.Builder - _, err := mapStrBuilder.WriteRune('{') - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - for i, entry := range argPairs { - _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value)) - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - if i < len(argPairs)-1 { - _, err = mapStrBuilder.WriteString(", ") - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - } - } - _, err = mapStrBuilder.WriteRune('}') - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - return mapStrBuilder.String(), nil -} - -// quoteForCEL takes a formatted, unquoted value and quotes it in a manner -// suitable for embedding directly in CEL. -func quoteForCEL(refVal ref.Val, unquotedValue string) string { - switch refVal.Type() { - case types.StringType: - return fmt.Sprintf("%q", unquotedValue) - case types.BytesType: - return fmt.Sprintf("b%q", unquotedValue) - case types.DoubleType: - // special case to handle infinity/NaN - num := refVal.Value().(float64) - if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) { - return fmt.Sprintf("%q", unquotedValue) - } - return unquotedValue - default: - return unquotedValue - } -} - -// FormatString returns the string representation of a CEL value. -// It is used to implement the %s specifier in the (string).format() extension -// function. -func FormatString(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.ListType: - return formatList(arg, locale) - case types.MapType: - return formatMap(arg, locale) - case types.IntType, types.UintType, types.DoubleType, - types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType: - argStrVal := arg.ConvertToType(types.StringType) - argStr, ok := argStrVal.Value().(string) - if !ok { - return "", fmt.Errorf("could not convert argument %q to string", argStrVal) - } - return argStr, nil - case types.NullType: - return "null", nil - default: - return "", fmt.Errorf("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", arg.Type().TypeName()) - } -} - -func formatDecimal(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.IntType: - argInt, ok := arg.ConvertToType(types.IntType).Value().(int64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) - } - return fmt.Sprintf("%d", argInt), nil - case types.UintType: - argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) - } - return fmt.Sprintf("%d", argInt), nil - default: - return "", fmt.Errorf("decimal clause can only be used on integers, was given %s", arg.Type().TypeName()) - } -} - -func matchLanguage(locale string) (language.Tag, error) { - matcher, err := makeMatcher(locale) - if err != nil { - return language.Und, err - } - tag, _ := language.MatchStrings(matcher, locale) - return tag, nil -} - -func makeMatcher(locale string) (language.Matcher, error) { - tags := make([]language.Tag, 0) - tag, err := language.Parse(locale) - if err != nil { - return nil, err - } - tags = append(tags, tag) - return language.NewMatcher(tags), nil -} - // quote implements a string quoting function. The string will be wrapped in // double quotes, and all valid CEL escape sequences will be escaped to show up // literally if printed. If the input contains any invalid UTF-8, the invalid runes @@ -940,156 +760,6 @@ func sanitize(s string) string { return sanitizedStringBuilder.String() } -type stringFormatter struct{} - -func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) { - return FormatString(arg, locale) -} - -func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) { - return formatDecimal(arg, locale) -} - -func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) { - if precision == nil { - precision = new(int) - *precision = defaultPrecision - } - return func(arg ref.Val, locale string) (string, error) { - strException := false - if arg.Type() == types.StringType { - argStr := arg.Value().(string) - if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { - strException = true - } - } - if arg.Type() != types.DoubleType && !strException { - return "", fmt.Errorf("fixed-point clause can only be used on doubles, was given %s", arg.Type().TypeName()) - } - argFloatVal := arg.ConvertToType(types.DoubleType) - argFloat, ok := argFloatVal.Value().(float64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) - } - fmtStr := fmt.Sprintf("%%.%df", *precision) - - matchedLocale, err := matchLanguage(locale) - if err != nil { - return "", fmt.Errorf("error matching locale: %w", err) - } - return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil - } -} - -func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) { - if precision == nil { - precision = new(int) - *precision = defaultPrecision - } - return func(arg ref.Val, locale string) (string, error) { - strException := false - if arg.Type() == types.StringType { - argStr := arg.Value().(string) - if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { - strException = true - } - } - if arg.Type() != types.DoubleType && !strException { - return "", fmt.Errorf("scientific clause can only be used on doubles, was given %s", arg.Type().TypeName()) - } - argFloatVal := arg.ConvertToType(types.DoubleType) - argFloat, ok := argFloatVal.Value().(float64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) - } - matchedLocale, err := matchLanguage(locale) - if err != nil { - return "", fmt.Errorf("error matching locale: %w", err) - } - fmtStr := fmt.Sprintf("%%%de", *precision) - return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil - } -} - -func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.IntType: - argInt := arg.Value().(int64) - // locale is intentionally unused as integers formatted as binary - // strings are locale-independent - return fmt.Sprintf("%b", argInt), nil - case types.UintType: - argInt := arg.Value().(uint64) - return fmt.Sprintf("%b", argInt), nil - case types.BoolType: - argBool := arg.Value().(bool) - if argBool { - return "1", nil - } - return "0", nil - default: - return "", fmt.Errorf("only integers and bools can be formatted as binary, was given %s", arg.Type().TypeName()) - } -} - -func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - fmtStr := "%x" - if useUpper { - fmtStr = "%X" - } - switch arg.Type() { - case types.StringType, types.BytesType: - if arg.Type() == types.BytesType { - return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil - } - return fmt.Sprintf(fmtStr, arg.Value().(string)), nil - case types.IntType: - argInt, ok := arg.Value().(int64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) - } - return fmt.Sprintf(fmtStr, argInt), nil - case types.UintType: - argInt, ok := arg.Value().(uint64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) - } - return fmt.Sprintf(fmtStr, argInt), nil - default: - return "", fmt.Errorf("only integers, byte buffers, and strings can be formatted as hex, was given %s", arg.Type().TypeName()) - } - } -} - -func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.IntType: - argInt := arg.Value().(int64) - return fmt.Sprintf("%o", argInt), nil - case types.UintType: - argInt := arg.Value().(uint64) - return fmt.Sprintf("%o", argInt), nil - default: - return "", fmt.Errorf("octal clause can only be used on integers, was given %s", arg.Type().TypeName()) - } -} - -type stringArgList struct { - args traits.Lister -} - -func (c *stringArgList) Arg(index int64) (ref.Val, error) { - if index >= c.args.Size().Value().(int64) { - return nil, fmt.Errorf("index %d out of range", index) - } - return c.args.Get(types.Int(index)), nil -} - -func (c *stringArgList) ArgSize() int64 { - return c.args.Size().Value().(int64) -} - var ( stringListType = reflect.TypeOf([]string{}) ) diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel index 3a5219eb5f..220e23d475 100644 --- a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel +++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel @@ -14,7 +14,6 @@ go_library( "decorators.go", "dispatcher.go", "evalstate.go", - "formatting.go", "interpretable.go", "interpreter.go", "optimizations.go", diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index 1fbaaf17e2..8f19bde7e1 100644 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -178,10 +178,8 @@ func numericValueEquals(value any, celValue ref.Val) bool { // NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing // AttributePattern matches with PartialActivation inputs. -func NewPartialAttributeFactory(container *containers.Container, - adapter types.Adapter, - provider types.Provider) AttributeFactory { - fac := NewAttributeFactory(container, adapter, provider) +func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := NewAttributeFactory(container, adapter, provider, opts...) return &partialAttributeFactory{ AttributeFactory: fac, container: container, diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index ca97bdfcf1..b1b3aacc83 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -126,21 +126,39 @@ type NamespacedAttribute interface { Qualifiers() []Qualifier } +// AttrFactoryOption specifies a functional option for configuring an attribute factory. +type AttrFactoryOption func(*attrFactory) *attrFactory + +// EnableErrorOnBadPresenceTest error generation when a presence test or optional field selection +// is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption { + return func(fac *attrFactory) *attrFactory { + fac.errorOnBadPresenceTest = value + return fac + } +} + // NewAttributeFactory returns a default AttributeFactory which is produces Attribute values // capable of resolving types by simple names and qualify the values using the supported qualifier // types: bool, int, string, and uint. -func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory { - return &attrFactory{ +func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := &attrFactory{ container: cont, adapter: a, provider: p, } + for _, o := range opts { + fac = o(fac) + } + return fac } type attrFactory struct { container *containers.Container adapter types.Adapter provider types.Provider + + errorOnBadPresenceTest bool } // AbsoluteAttribute refers to a variable value and an optional qualifier path. @@ -149,12 +167,13 @@ type attrFactory struct { // resolution rules. func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { return &absoluteAttribute{ - id: id, - namespaceNames: names, - qualifiers: []Qualifier{}, - adapter: r.adapter, - provider: r.provider, - fac: r, + id: id, + namespaceNames: names, + qualifiers: []Qualifier{}, + adapter: r.adapter, + provider: r.provider, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, } } @@ -188,11 +207,12 @@ func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { // RelativeAttribute refers to an expression and an optional qualifier path. func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { return &relativeAttribute{ - id: id, - operand: operand, - qualifiers: []Qualifier{}, - adapter: r.adapter, - fac: r, + id: id, + operand: operand, + qualifiers: []Qualifier{}, + adapter: r.adapter, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, } } @@ -214,7 +234,7 @@ func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, o }, nil } } - return newQualifier(r.adapter, qualID, val, opt) + return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest) } type absoluteAttribute struct { @@ -226,6 +246,8 @@ type absoluteAttribute struct { adapter types.Adapter provider types.Provider fac AttributeFactory + + errorOnBadPresenceTest bool } // ID implements the Attribute interface method. @@ -287,6 +309,9 @@ func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { // determine whether the type is unknown before returning. obj, found := vars.ResolveName(nm) if found { + if celErr, ok := obj.(*types.Err); ok { + return nil, celErr.Unwrap() + } obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers) if err != nil { return nil, err @@ -511,6 +536,8 @@ type relativeAttribute struct { qualifiers []Qualifier adapter types.Adapter fac AttributeFactory + + errorOnBadPresenceTest bool } // ID is an implementation of the Attribute interface method. @@ -574,7 +601,7 @@ func (a *relativeAttribute) String() string { return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) } -func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) { +func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) { var qual Qualifier switch val := v.(type) { case Attribute: @@ -589,71 +616,138 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, }, nil case string: qual = &stringQualifier{ - id: id, - value: val, - celValue: types.String(val), - adapter: adapter, - optional: opt, + id: id, + value: val, + celValue: types.String(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int: qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int32: qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int64: qual = &intQualifier{ - id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint32: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint64: qual = &uintQualifier{ - id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case bool: qual = &boolQualifier{ - id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Bool(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case float32: qual = &doubleQualifier{ - id: id, - value: float64(val), - celValue: types.Double(val), - adapter: adapter, - optional: opt, + id: id, + value: float64(val), + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case float64: qual = &doubleQualifier{ - id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.String: qual = &stringQualifier{ - id: id, value: string(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: string(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Int: qual = &intQualifier{ - id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Uint: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Bool: qual = &boolQualifier{ - id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: bool(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Double: qual = &doubleQualifier{ - id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: float64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case *types.Unknown: qual = &unknownQualifier{id: id, value: val} @@ -684,11 +778,12 @@ func (q *attrQualifier) IsOptional() bool { } type stringQualifier struct { - id int64 - value string - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value string + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -771,7 +866,7 @@ func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -785,11 +880,12 @@ func (q *stringQualifier) Value() ref.Val { } type intQualifier struct { - id int64 - value int64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value int64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -895,7 +991,7 @@ func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, p return o[i], true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -912,11 +1008,12 @@ func (q *intQualifier) Value() ref.Val { } type uintQualifier struct { - id int64 - value uint64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value uint64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -963,7 +1060,7 @@ func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -977,11 +1074,12 @@ func (q *uintQualifier) Value() ref.Val { } type boolQualifier struct { - id int64 - value bool - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value bool + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -1014,7 +1112,7 @@ func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -1089,11 +1187,12 @@ func (q *fieldQualifier) Value() ref.Val { // type may not be known ahead of time and may not conform to the standard types supported as valid // protobuf map key types. type doubleQualifier struct { - id int64 - value float64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value float64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -1117,7 +1216,7 @@ func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnl } func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } // Value implements the ConstantQualifier interface @@ -1223,7 +1322,7 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt // refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and // apply the qualifier with the option to presence test field accesses before retrieving field values. -func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { +func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) { celVal := adapter.NativeToValue(obj) switch v := celVal.(type) { case *types.Unknown: @@ -1280,7 +1379,7 @@ func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, prese } return val, true, nil default: - if presenceTest { + if presenceTest && !errorOnBadPresenceTest { return nil, false, nil } return nil, false, missingKey(idx) diff --git a/vendor/github.com/google/cel-go/interpreter/formatting.go b/vendor/github.com/google/cel-go/interpreter/formatting.go deleted file mode 100644 index e3f7533745..0000000000 --- a/vendor/github.com/google/cel-go/interpreter/formatting.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "errors" - "fmt" - "strconv" - "strings" - "unicode" - - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" -) - -type typeVerifier func(int64, ...ref.Type) (bool, error) - -// InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports -// any errors at compile time. -func InterpolateFormattedString(verifier typeVerifier) InterpretableDecorator { - return func(inter Interpretable) (Interpretable, error) { - call, ok := inter.(InterpretableCall) - if !ok { - return inter, nil - } - if call.OverloadID() != "string_format" { - return inter, nil - } - args := call.Args() - if len(args) != 2 { - return nil, fmt.Errorf("wrong number of arguments to string.format (expected 2, got %d)", len(args)) - } - fmtStrInter, ok := args[0].(InterpretableConst) - if !ok { - return inter, nil - } - var fmtArgsInter InterpretableConstructor - fmtArgsInter, ok = args[1].(InterpretableConstructor) - if !ok { - return inter, nil - } - if fmtArgsInter.Type() != types.ListType { - // don't necessarily return an error since the list may be DynType - return inter, nil - } - formatStr := fmtStrInter.Value().Value().(string) - initVals := fmtArgsInter.InitVals() - - formatCheck := &formatCheck{ - args: initVals, - verifier: verifier, - } - // use a placeholder locale, since locale doesn't affect syntax - _, err := ParseFormatString(formatStr, formatCheck, formatCheck, "en_US") - if err != nil { - return nil, err - } - seenArgs := formatCheck.argsRequested - if len(initVals) > seenArgs { - return nil, fmt.Errorf("too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(initVals)) - } - return inter, nil - } -} - -type formatCheck struct { - args []Interpretable - argsRequested int - curArgIndex int64 - enableCheckArgTypes bool - verifier typeVerifier -} - -func (c *formatCheck) String(arg ref.Val, locale string) (string, error) { - valid, err := verifyString(c.args[c.curArgIndex], c.verifier) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps") - } - return "", nil -} - -func (c *formatCheck) Decimal(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("integer clause can only be used on integers") - } - return "", nil -} - -func (c *formatCheck) Fixed(precision *int) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values - valid, err := c.verifier(id, types.DoubleType, types.StringType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("fixed-point clause can only be used on doubles") - } - return "", nil - } -} - -func (c *formatCheck) Scientific(precision *int) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.DoubleType, types.StringType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("scientific clause can only be used on doubles") - } - return "", nil - } -} - -func (c *formatCheck) Binary(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType, types.BoolType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("only integers and bools can be formatted as binary") - } - return "", nil -} - -func (c *formatCheck) Hex(useUpper bool) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType, types.StringType, types.BytesType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("only integers, byte buffers, and strings can be formatted as hex") - } - return "", nil - } -} - -func (c *formatCheck) Octal(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("octal clause can only be used on integers") - } - return "", nil -} - -func (c *formatCheck) Arg(index int64) (ref.Val, error) { - c.argsRequested++ - c.curArgIndex = index - // return a dummy value - this is immediately passed to back to us - // through one of the FormatCallback functions, so anything will do - return types.Int(0), nil -} - -func (c *formatCheck) ArgSize() int64 { - return int64(len(c.args)) -} - -func verifyString(sub Interpretable, verifier typeVerifier) (bool, error) { - subVerified, err := verifier(sub.ID(), - types.ListType, types.MapType, types.IntType, types.UintType, types.DoubleType, - types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType, types.NullType) - if err != nil { - return false, err - } - if !subVerified { - return false, nil - } - con, ok := sub.(InterpretableConstructor) - if ok { - members := con.InitVals() - for _, m := range members { - // recursively verify if we're dealing with a list/map - verified, err := verifyString(m, verifier) - if err != nil { - return false, err - } - if !verified { - return false, nil - } - } - } - return true, nil - -} - -// FormatStringInterpolator is an interface that allows user-defined behavior -// for formatting clause implementations, as well as argument retrieval. -// Each function is expected to support the appropriate types as laid out in -// the string.format documentation, and to return an error if given an inappropriate type. -type FormatStringInterpolator interface { - // String takes a ref.Val and a string representing the current locale identifier - // and returns the Val formatted as a string, or an error if one occurred. - String(ref.Val, string) (string, error) - - // Decimal takes a ref.Val and a string representing the current locale identifier - // and returns the Val formatted as a decimal integer, or an error if one occurred. - Decimal(ref.Val, string) (string, error) - - // Fixed takes an int pointer representing precision (or nil if none was given) and - // returns a function operating in a similar manner to String and Decimal, taking a - // ref.Val and locale and returning the appropriate string. A closure is returned - // so precision can be set without needing an additional function call/configuration. - Fixed(*int) func(ref.Val, string) (string, error) - - // Scientific functions identically to Fixed, except the string returned from the closure - // is expected to be in scientific notation. - Scientific(*int) func(ref.Val, string) (string, error) - - // Binary takes a ref.Val and a string representing the current locale identifier - // and returns the Val formatted as a binary integer, or an error if one occurred. - Binary(ref.Val, string) (string, error) - - // Hex takes a boolean that, if true, indicates the hex string output by the returned - // closure should use uppercase letters for A-F. - Hex(bool) func(ref.Val, string) (string, error) - - // Octal takes a ref.Val and a string representing the current locale identifier and - // returns the Val formatted in octal, or an error if one occurred. - Octal(ref.Val, string) (string, error) -} - -// FormatList is an interface that allows user-defined list-like datatypes to be used -// for formatting clause implementations. -type FormatList interface { - // Arg returns the ref.Val at the given index, or an error if one occurred. - Arg(int64) (ref.Val, error) - // ArgSize returns the length of the argument list. - ArgSize() int64 -} - -type clauseImpl func(ref.Val, string) (string, error) - -// ParseFormatString formats a string according to the string.format syntax, taking the clause implementations -// from the provided FormatCallback and the args from the given FormatList. -func ParseFormatString(formatStr string, callback FormatStringInterpolator, list FormatList, locale string) (string, error) { - i := 0 - argIndex := 0 - var builtStr strings.Builder - for i < len(formatStr) { - if formatStr[i] == '%' { - if i+1 < len(formatStr) && formatStr[i+1] == '%' { - err := builtStr.WriteByte('%') - if err != nil { - return "", fmt.Errorf("error writing format string: %w", err) - } - i += 2 - continue - } else { - argAny, err := list.Arg(int64(argIndex)) - if err != nil { - return "", err - } - if i+1 >= len(formatStr) { - return "", errors.New("unexpected end of string") - } - if int64(argIndex) >= list.ArgSize() { - return "", fmt.Errorf("index %d out of range", argIndex) - } - numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale) - if refErr != nil { - return "", refErr - } - _, err = builtStr.WriteString(val) - if err != nil { - return "", fmt.Errorf("error writing format string: %w", err) - } - i += numRead - argIndex++ - } - } else { - err := builtStr.WriteByte(formatStr[i]) - if err != nil { - return "", fmt.Errorf("error writing format string: %w", err) - } - i++ - } - } - return builtStr.String(), nil -} - -// parseAndFormatClause parses the format clause at the start of the given string with val, and returns -// how many characters were consumed and the substituted string form of val, or an error if one occurred. -func parseAndFormatClause(formatStr string, val ref.Val, callback FormatStringInterpolator, list FormatList, locale string) (int, string, error) { - i := 1 - read, formatter, err := parseFormattingClause(formatStr[i:], callback) - i += read - if err != nil { - return -1, "", fmt.Errorf("could not parse formatting clause: %s", err) - } - - valStr, err := formatter(val, locale) - if err != nil { - return -1, "", fmt.Errorf("error during formatting: %s", err) - } - return i, valStr, nil -} - -func parseFormattingClause(formatStr string, callback FormatStringInterpolator) (int, clauseImpl, error) { - i := 0 - read, precision, err := parsePrecision(formatStr[i:]) - i += read - if err != nil { - return -1, nil, fmt.Errorf("error while parsing precision: %w", err) - } - r := rune(formatStr[i]) - i++ - switch r { - case 's': - return i, callback.String, nil - case 'd': - return i, callback.Decimal, nil - case 'f': - return i, callback.Fixed(precision), nil - case 'e': - return i, callback.Scientific(precision), nil - case 'b': - return i, callback.Binary, nil - case 'x', 'X': - return i, callback.Hex(unicode.IsUpper(r)), nil - case 'o': - return i, callback.Octal, nil - default: - return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r) - } -} - -func parsePrecision(formatStr string) (int, *int, error) { - i := 0 - if formatStr[i] != '.' { - return i, nil, nil - } - i++ - var buffer strings.Builder - for { - if i >= len(formatStr) { - return -1, nil, errors.New("could not find end of precision specifier") - } - if !isASCIIDigit(rune(formatStr[i])) { - break - } - buffer.WriteByte(formatStr[i]) - i++ - } - precision, err := strconv.Atoi(buffer.String()) - if err != nil { - return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err) - } - return i, &precision, nil -} - -func isASCIIDigit(r rune) bool { - return r <= unicode.MaxASCII && unicode.IsDigit(r) -} diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index c4598dfa73..5612384074 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -125,7 +125,7 @@ func (test *evalTestOnly) Eval(ctx Activation) ref.Val { val, err := test.Resolve(ctx) // Return an error if the resolve step fails if err != nil { - return types.WrapErr(err) + return types.LabelErrNode(test.id, types.WrapErr(err)) } if optVal, isOpt := val.(*types.Optional); isOpt { return types.Bool(optVal.HasValue()) @@ -231,6 +231,7 @@ func (or *evalOr) Eval(ctx Activation) ref.Val { } else { err = types.MaybeNoSuchOverloadErr(val) } + err = types.LabelErrNode(or.id, err) } } } @@ -273,6 +274,7 @@ func (and *evalAnd) Eval(ctx Activation) ref.Val { } else { err = types.MaybeNoSuchOverloadErr(val) } + err = types.LabelErrNode(and.id, err) } } } @@ -377,7 +379,7 @@ func (zero *evalZeroArity) ID() int64 { // Eval implements the Interpretable interface method. func (zero *evalZeroArity) Eval(ctx Activation) ref.Val { - return zero.impl() + return types.LabelErrNode(zero.id, zero.impl()) } // Function implements the InterpretableCall interface method. @@ -421,14 +423,14 @@ func (un *evalUnary) Eval(ctx Activation) ref.Val { // If the implementation is bound and the argument value has the right traits required to // invoke it, then call the implementation. if un.impl != nil && (un.trait == 0 || (!strict && types.IsUnknownOrError(argVal)) || argVal.Type().HasTrait(un.trait)) { - return un.impl(argVal) + return types.LabelErrNode(un.id, un.impl(argVal)) } // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the // operand (arg0). if argVal.Type().HasTrait(traits.ReceiverType) { - return argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{}) + return types.LabelErrNode(un.id, argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{})) } - return types.NewErr("no such overload: %s", un.function) + return types.NewErrWithNodeID(un.id, "no such overload: %s", un.function) } // Function implements the InterpretableCall interface method. @@ -479,14 +481,14 @@ func (bin *evalBinary) Eval(ctx Activation) ref.Val { // If the implementation is bound and the argument value has the right traits required to // invoke it, then call the implementation. if bin.impl != nil && (bin.trait == 0 || (!strict && types.IsUnknownOrError(lVal)) || lVal.Type().HasTrait(bin.trait)) { - return bin.impl(lVal, rVal) + return types.LabelErrNode(bin.id, bin.impl(lVal, rVal)) } // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the // operand (arg0). if lVal.Type().HasTrait(traits.ReceiverType) { - return lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal}) + return types.LabelErrNode(bin.id, lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal})) } - return types.NewErr("no such overload: %s", bin.function) + return types.NewErrWithNodeID(bin.id, "no such overload: %s", bin.function) } // Function implements the InterpretableCall interface method. @@ -545,14 +547,14 @@ func (fn *evalVarArgs) Eval(ctx Activation) ref.Val { // invoke it, then call the implementation. arg0 := argVals[0] if fn.impl != nil && (fn.trait == 0 || (!strict && types.IsUnknownOrError(arg0)) || arg0.Type().HasTrait(fn.trait)) { - return fn.impl(argVals...) + return types.LabelErrNode(fn.id, fn.impl(argVals...)) } // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the // operand (arg0). if arg0.Type().HasTrait(traits.ReceiverType) { - return arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:]) + return types.LabelErrNode(fn.id, arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:])) } - return types.NewErr("no such overload: %s", fn.function) + return types.NewErrWithNodeID(fn.id, "no such overload: %s %d", fn.function, fn.id) } // Function implements the InterpretableCall interface method. @@ -595,7 +597,7 @@ func (l *evalList) Eval(ctx Activation) ref.Val { if l.hasOptionals && l.optionals[i] { optVal, ok := elemVal.(*types.Optional) if !ok { - return invalidOptionalElementInit(elemVal) + return types.LabelErrNode(l.id, invalidOptionalElementInit(elemVal)) } if !optVal.HasValue() { continue @@ -645,7 +647,7 @@ func (m *evalMap) Eval(ctx Activation) ref.Val { if m.hasOptionals && m.optionals[i] { optVal, ok := valVal.(*types.Optional) if !ok { - return invalidOptionalEntryInit(keyVal, valVal) + return types.LabelErrNode(m.id, invalidOptionalEntryInit(keyVal, valVal)) } if !optVal.HasValue() { delete(entries, keyVal) @@ -705,7 +707,7 @@ func (o *evalObj) Eval(ctx Activation) ref.Val { if o.hasOptionals && o.optionals[i] { optVal, ok := val.(*types.Optional) if !ok { - return invalidOptionalEntryInit(field, val) + return types.LabelErrNode(o.id, invalidOptionalEntryInit(field, val)) } if !optVal.HasValue() { delete(fieldVals, field) @@ -715,7 +717,7 @@ func (o *evalObj) Eval(ctx Activation) ref.Val { } fieldVals[field] = val } - return o.provider.NewValue(o.typeName, fieldVals) + return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals)) } func (o *evalObj) InitVals() []Interpretable { @@ -921,7 +923,7 @@ func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.ConstantQualifier.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else { val = e.adapter.NativeToValue(out) } @@ -934,7 +936,7 @@ func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presence out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else if out != nil { val = e.adapter.NativeToValue(out) } else if presenceOnly { @@ -964,7 +966,7 @@ func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.Attribute.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else { val = e.adapter.NativeToValue(out) } @@ -977,7 +979,7 @@ func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceO out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else if out != nil { val = e.adapter.NativeToValue(out) } else if presenceOnly { @@ -1001,7 +1003,7 @@ func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.Qualifier.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else { val = e.adapter.NativeToValue(out) } @@ -1014,7 +1016,7 @@ func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else if out != nil { val = e.adapter.NativeToValue(out) } else if presenceOnly { @@ -1157,12 +1159,12 @@ func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { } if cBool { if tErr != nil { - return types.WrapErr(tErr) + return types.LabelErrNode(cond.id, types.WrapErr(tErr)) } return cond.adapter.NativeToValue(tVal) } if fErr != nil { - return types.WrapErr(fErr) + return types.LabelErrNode(cond.id, types.WrapErr(fErr)) } return cond.adapter.NativeToValue(fVal) } @@ -1202,7 +1204,7 @@ func (a *evalAttr) Adapter() types.Adapter { func (a *evalAttr) Eval(ctx Activation) ref.Val { v, err := a.attr.Resolve(ctx) if err != nil { - return types.WrapErr(err) + return types.LabelErrNode(a.ID(), types.WrapErr(err)) } return a.adapter.NativeToValue(v) } diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go index 00fc74732c..0aca74d88b 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpreter.go +++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go @@ -22,19 +22,13 @@ import ( "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Interpreter generates a new Interpretable from a checked or unchecked expression. type Interpreter interface { // NewInterpretable creates an Interpretable from a checked expression and an // optional list of InterpretableDecorator values. - NewInterpretable(checked *ast.CheckedAST, decorators ...InterpretableDecorator) (Interpretable, error) - - // NewUncheckedInterpretable returns an Interpretable from a parsed expression - // and an optional list of InterpretableDecorator values. - NewUncheckedInterpretable(expr *exprpb.Expr, decorators ...InterpretableDecorator) (Interpretable, error) + NewInterpretable(exprAST *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error) } // EvalObserver is a functional interface that accepts an expression id and an observed value. @@ -177,7 +171,7 @@ func NewInterpreter(dispatcher Dispatcher, // NewIntepretable implements the Interpreter interface method. func (i *exprInterpreter) NewInterpretable( - checked *ast.CheckedAST, + checked *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error) { p := newPlanner( i.dispatcher, @@ -187,19 +181,5 @@ func (i *exprInterpreter) NewInterpretable( i.container, checked, decorators...) - return p.Plan(checked.Expr) -} - -// NewUncheckedIntepretable implements the Interpreter interface method. -func (i *exprInterpreter) NewUncheckedInterpretable( - expr *exprpb.Expr, - decorators ...InterpretableDecorator) (Interpretable, error) { - p := newUncheckedPlanner( - i.dispatcher, - i.provider, - i.adapter, - i.attrFactory, - i.container, - decorators...) - return p.Plan(expr) + return p.Plan(checked.Expr()) } diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go index 757cd080e5..cf371f95d5 100644 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -23,15 +23,12 @@ import ( "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value. type interpretablePlanner interface { // Plan generates an Interpretable value (or error) from the input proto Expr. - Plan(expr *exprpb.Expr) (Interpretable, error) + Plan(expr ast.Expr) (Interpretable, error) } // newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, @@ -43,28 +40,7 @@ func newPlanner(disp Dispatcher, adapter types.Adapter, attrFactory AttributeFactory, cont *containers.Container, - checked *ast.CheckedAST, - decorators ...InterpretableDecorator) interpretablePlanner { - return &planner{ - disp: disp, - provider: provider, - adapter: adapter, - attrFactory: attrFactory, - container: cont, - refMap: checked.ReferenceMap, - typeMap: checked.TypeMap, - decorators: decorators, - } -} - -// newUncheckedPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, -// TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in -// Select expressions are resolved lazily at evaluation time. -func newUncheckedPlanner(disp Dispatcher, - provider types.Provider, - adapter types.Adapter, - attrFactory AttributeFactory, - cont *containers.Container, + exprAST *ast.AST, decorators ...InterpretableDecorator) interpretablePlanner { return &planner{ disp: disp, @@ -72,8 +48,8 @@ func newUncheckedPlanner(disp Dispatcher, adapter: adapter, attrFactory: attrFactory, container: cont, - refMap: make(map[int64]*ast.ReferenceInfo), - typeMap: make(map[int64]*types.Type), + refMap: exprAST.ReferenceMap(), + typeMap: exprAST.TypeMap(), decorators: decorators, } } @@ -95,22 +71,24 @@ type planner struct { // useful for layering functionality into the evaluation that is not natively understood by CEL, // such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of // repeated expressions. -func (p *planner) Plan(expr *exprpb.Expr) (Interpretable, error) { - switch expr.GetExprKind().(type) { - case *exprpb.Expr_CallExpr: +func (p *planner) Plan(expr ast.Expr) (Interpretable, error) { + switch expr.Kind() { + case ast.CallKind: return p.decorate(p.planCall(expr)) - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: return p.decorate(p.planIdent(expr)) - case *exprpb.Expr_SelectExpr: + case ast.LiteralKind: + return p.decorate(p.planConst(expr)) + case ast.SelectKind: return p.decorate(p.planSelect(expr)) - case *exprpb.Expr_ListExpr: + case ast.ListKind: return p.decorate(p.planCreateList(expr)) - case *exprpb.Expr_StructExpr: + case ast.MapKind: + return p.decorate(p.planCreateMap(expr)) + case ast.StructKind: return p.decorate(p.planCreateStruct(expr)) - case *exprpb.Expr_ComprehensionExpr: + case ast.ComprehensionKind: return p.decorate(p.planComprehension(expr)) - case *exprpb.Expr_ConstExpr: - return p.decorate(p.planConst(expr)) } return nil, fmt.Errorf("unsupported expr: %v", expr) } @@ -132,16 +110,16 @@ func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) { } // planIdent creates an Interpretable that resolves an identifier from an Activation. -func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) { +func (p *planner) planIdent(expr ast.Expr) (Interpretable, error) { // Establish whether the identifier is in the reference map. - if identRef, found := p.refMap[expr.GetId()]; found { - return p.planCheckedIdent(expr.GetId(), identRef) + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) } // Create the possible attribute list for the unresolved reference. - ident := expr.GetIdentExpr() + ident := expr.AsIdent() return &evalAttr{ adapter: p.adapter, - attr: p.attrFactory.MaybeAttribute(expr.GetId(), ident.Name), + attr: p.attrFactory.MaybeAttribute(expr.ID(), ident), }, nil } @@ -174,20 +152,20 @@ func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Inter // a) selects a field from a map or proto. // b) creates a field presence test for a select within a has() macro. // c) resolves the select expression to a namespaced identifier. -func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { +func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) { // If the Select id appears in the reference map from the CheckedExpr proto then it is either // a namespaced identifier or enum value. - if identRef, found := p.refMap[expr.GetId()]; found { - return p.planCheckedIdent(expr.GetId(), identRef) + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) } - sel := expr.GetSelectExpr() + sel := expr.AsSelect() // Plan the operand evaluation. - op, err := p.Plan(sel.GetOperand()) + op, err := p.Plan(sel.Operand()) if err != nil { return nil, err } - opType := p.typeMap[sel.GetOperand().GetId()] + opType := p.typeMap[sel.Operand().ID()] // If the Select was marked TestOnly, this is a presence test. // @@ -211,14 +189,14 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { } // Build a qualifier for the attribute. - qual, err := p.attrFactory.NewQualifier(opType, expr.GetId(), sel.GetField(), false) + qual, err := p.attrFactory.NewQualifier(opType, expr.ID(), sel.FieldName(), false) if err != nil { return nil, err } // Modify the attribute to be test-only. - if sel.GetTestOnly() { + if sel.IsTestOnly() { attr = &evalTestOnly{ - id: expr.GetId(), + id: expr.ID(), InterpretableAttribute: attr, } } @@ -230,10 +208,10 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { // planCall creates a callable Interpretable while specializing for common functions and invocation // patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in // optimized Interpretable values. -func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) { - call := expr.GetCallExpr() +func (p *planner) planCall(expr ast.Expr) (Interpretable, error) { + call := expr.AsCall() target, fnName, oName := p.resolveFunction(expr) - argCount := len(call.GetArgs()) + argCount := len(call.Args()) var offset int if target != nil { argCount++ @@ -248,7 +226,7 @@ func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) { } args[0] = arg } - for i, argExpr := range call.GetArgs() { + for i, argExpr := range call.Args() { arg, err := p.Plan(argExpr) if err != nil { return nil, err @@ -307,7 +285,7 @@ func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) { } // planCallZero generates a zero-arity callable Interpretable. -func (p *planner) planCallZero(expr *exprpb.Expr, +func (p *planner) planCallZero(expr ast.Expr, function string, overload string, impl *functions.Overload) (Interpretable, error) { @@ -315,7 +293,7 @@ func (p *planner) planCallZero(expr *exprpb.Expr, return nil, fmt.Errorf("no such overload: %s()", function) } return &evalZeroArity{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, impl: impl.Function, @@ -323,7 +301,7 @@ func (p *planner) planCallZero(expr *exprpb.Expr, } // planCallUnary generates a unary callable Interpretable. -func (p *planner) planCallUnary(expr *exprpb.Expr, +func (p *planner) planCallUnary(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -340,7 +318,7 @@ func (p *planner) planCallUnary(expr *exprpb.Expr, nonStrict = impl.NonStrict } return &evalUnary{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, arg: args[0], @@ -351,7 +329,7 @@ func (p *planner) planCallUnary(expr *exprpb.Expr, } // planCallBinary generates a binary callable Interpretable. -func (p *planner) planCallBinary(expr *exprpb.Expr, +func (p *planner) planCallBinary(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -368,7 +346,7 @@ func (p *planner) planCallBinary(expr *exprpb.Expr, nonStrict = impl.NonStrict } return &evalBinary{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, lhs: args[0], @@ -380,7 +358,7 @@ func (p *planner) planCallBinary(expr *exprpb.Expr, } // planCallVarArgs generates a variable argument callable Interpretable. -func (p *planner) planCallVarArgs(expr *exprpb.Expr, +func (p *planner) planCallVarArgs(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -397,7 +375,7 @@ func (p *planner) planCallVarArgs(expr *exprpb.Expr, nonStrict = impl.NonStrict } return &evalVarArgs{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, args: args, @@ -408,41 +386,41 @@ func (p *planner) planCallVarArgs(expr *exprpb.Expr, } // planCallEqual generates an equals (==) Interpretable. -func (p *planner) planCallEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalEq{ - id: expr.GetId(), + id: expr.ID(), lhs: args[0], rhs: args[1], }, nil } // planCallNotEqual generates a not equals (!=) Interpretable. -func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalNe{ - id: expr.GetId(), + id: expr.ID(), lhs: args[0], rhs: args[1], }, nil } // planCallLogicalAnd generates a logical and (&&) Interpretable. -func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalAnd{ - id: expr.GetId(), + id: expr.ID(), terms: args, }, nil } // planCallLogicalOr generates a logical or (||) Interpretable. -func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalOr{ - id: expr.GetId(), + id: expr.ID(), terms: args, }, nil } // planCallConditional generates a conditional / ternary (c ? t : f) Interpretable. -func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) { cond := args[0] t := args[1] var tAttr Attribute @@ -464,13 +442,13 @@ func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) ( return &evalAttr{ adapter: p.adapter, - attr: p.attrFactory.ConditionalAttribute(expr.GetId(), cond, tAttr, fAttr), + attr: p.attrFactory.ConditionalAttribute(expr.ID(), cond, tAttr, fAttr), }, nil } // planCallIndex either extends an attribute with the argument to the index operation, or creates // a relative attribute based on the return of a function call or operation. -func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) { +func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) { op := args[0] ind := args[1] opType := p.typeMap[op.ID()] @@ -489,11 +467,11 @@ func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optiona var qual Qualifier switch ind := ind.(type) { case InterpretableConst: - qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind.Value(), optional) + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind.Value(), optional) case InterpretableAttribute: - qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind, optional) + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind, optional) default: - qual, err = p.relativeAttr(expr.GetId(), ind, optional) + qual, err = p.relativeAttr(expr.ID(), ind, optional) } if err != nil { return nil, err @@ -505,10 +483,10 @@ func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optiona } // planCreateList generates a list construction Interpretable. -func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) { - list := expr.GetListExpr() - optionalIndices := list.GetOptionalIndices() - elements := list.GetElements() +func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { + list := expr.AsList() + optionalIndices := list.OptionalIndices() + elements := list.Elements() optionals := make([]bool, len(elements)) for _, index := range optionalIndices { if index < 0 || index >= int32(len(elements)) { @@ -525,7 +503,7 @@ func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) { elems[i] = elemVal } return &evalList{ - id: expr.GetId(), + id: expr.ID(), elems: elems, optionals: optionals, hasOptionals: len(optionals) != 0, @@ -534,31 +512,29 @@ func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) { } // planCreateStruct generates a map or object construction Interpretable. -func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) { - str := expr.GetStructExpr() - if len(str.MessageName) != 0 { - return p.planCreateObj(expr) - } - entries := str.GetEntries() +func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { + m := expr.AsMap() + entries := m.Entries() optionals := make([]bool, len(entries)) keys := make([]Interpretable, len(entries)) vals := make([]Interpretable, len(entries)) - for i, entry := range entries { - keyVal, err := p.Plan(entry.GetMapKey()) + for i, e := range entries { + entry := e.AsMapEntry() + keyVal, err := p.Plan(entry.Key()) if err != nil { return nil, err } keys[i] = keyVal - valVal, err := p.Plan(entry.GetValue()) + valVal, err := p.Plan(entry.Value()) if err != nil { return nil, err } vals[i] = valVal - optionals[i] = entry.GetOptionalEntry() + optionals[i] = entry.IsOptional() } return &evalMap{ - id: expr.GetId(), + id: expr.ID(), keys: keys, vals: vals, optionals: optionals, @@ -568,27 +544,28 @@ func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) { } // planCreateObj generates an object construction Interpretable. -func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) { - obj := expr.GetStructExpr() - typeName, defined := p.resolveTypeName(obj.GetMessageName()) +func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { + obj := expr.AsStruct() + typeName, defined := p.resolveTypeName(obj.TypeName()) if !defined { - return nil, fmt.Errorf("unknown type: %s", obj.GetMessageName()) - } - entries := obj.GetEntries() - optionals := make([]bool, len(entries)) - fields := make([]string, len(entries)) - vals := make([]Interpretable, len(entries)) - for i, entry := range entries { - fields[i] = entry.GetFieldKey() - val, err := p.Plan(entry.GetValue()) + return nil, fmt.Errorf("unknown type: %s", obj.TypeName()) + } + objFields := obj.Fields() + optionals := make([]bool, len(objFields)) + fields := make([]string, len(objFields)) + vals := make([]Interpretable, len(objFields)) + for i, f := range objFields { + field := f.AsStructField() + fields[i] = field.Name() + val, err := p.Plan(field.Value()) if err != nil { return nil, err } vals[i] = val - optionals[i] = entry.GetOptionalEntry() + optionals[i] = field.IsOptional() } return &evalObj{ - id: expr.GetId(), + id: expr.ID(), typeName: typeName, fields: fields, vals: vals, @@ -599,33 +576,33 @@ func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) { } // planComprehension generates an Interpretable fold operation. -func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) { - fold := expr.GetComprehensionExpr() - accu, err := p.Plan(fold.GetAccuInit()) +func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { + fold := expr.AsComprehension() + accu, err := p.Plan(fold.AccuInit()) if err != nil { return nil, err } - iterRange, err := p.Plan(fold.GetIterRange()) + iterRange, err := p.Plan(fold.IterRange()) if err != nil { return nil, err } - cond, err := p.Plan(fold.GetLoopCondition()) + cond, err := p.Plan(fold.LoopCondition()) if err != nil { return nil, err } - step, err := p.Plan(fold.GetLoopStep()) + step, err := p.Plan(fold.LoopStep()) if err != nil { return nil, err } - result, err := p.Plan(fold.GetResult()) + result, err := p.Plan(fold.Result()) if err != nil { return nil, err } return &evalFold{ - id: expr.GetId(), - accuVar: fold.AccuVar, + id: expr.ID(), + accuVar: fold.AccuVar(), accu: accu, - iterVar: fold.IterVar, + iterVar: fold.IterVar(), iterRange: iterRange, cond: cond, step: step, @@ -635,37 +612,8 @@ func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) { } // planConst generates a constant valued Interpretable. -func (p *planner) planConst(expr *exprpb.Expr) (Interpretable, error) { - val, err := p.constValue(expr.GetConstExpr()) - if err != nil { - return nil, err - } - return NewConstValue(expr.GetId(), val), nil -} - -// constValue converts a proto Constant value to a ref.Val. -func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) { - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return p.adapter.NativeToValue(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: - return p.adapter.NativeToValue(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: - return p.adapter.NativeToValue(c.GetDoubleValue()), nil - case *exprpb.Constant_DurationValue: - return p.adapter.NativeToValue(c.GetDurationValue().AsDuration()), nil - case *exprpb.Constant_Int64Value: - return p.adapter.NativeToValue(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: - return p.adapter.NativeToValue(c.GetNullValue()), nil - case *exprpb.Constant_StringValue: - return p.adapter.NativeToValue(c.GetStringValue()), nil - case *exprpb.Constant_TimestampValue: - return p.adapter.NativeToValue(c.GetTimestampValue().AsTime()), nil - case *exprpb.Constant_Uint64Value: - return p.adapter.NativeToValue(c.GetUint64Value()), nil - } - return nil, fmt.Errorf("unknown constant type: %v", c) +func (p *planner) planConst(expr ast.Expr) (Interpretable, error) { + return NewConstValue(expr.ID(), expr.AsLiteral()), nil } // resolveTypeName takes a qualified string constructed at parse time, applies the proto @@ -687,17 +635,20 @@ func (p *planner) resolveTypeName(typeName string) (string, bool) { // - The target expression may only consist of ident and select expressions. // - The function is declared in the environment using its fully-qualified name. // - The fully-qualified function name matches the string serialized target value. -func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, string) { +func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { // Note: similar logic exists within the `checker/checker.go`. If making changes here // please consider the impact on checker.go and consolidate implementations or mirror code // as appropriate. - call := expr.GetCallExpr() - target := call.GetTarget() - fnName := call.GetFunction() + call := expr.AsCall() + var target ast.Expr = nil + if call.IsMemberFunction() { + target = call.Target() + } + fnName := call.FunctionName() // Checked expressions always have a reference map entry, and _should_ have the fully qualified // function name as the fnName value. - oRef, hasOverload := p.refMap[expr.GetId()] + oRef, hasOverload := p.refMap[expr.ID()] if hasOverload { if len(oRef.OverloadIDs) == 1 { return target, fnName, oRef.OverloadIDs[0] @@ -771,16 +722,30 @@ func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (Interpre // toQualifiedName converts an expression AST into a qualified name if possible, with a boolean // 'found' value that indicates if the conversion is successful. -func (p *planner) toQualifiedName(operand *exprpb.Expr) (string, bool) { +func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) { // If the checker identified the expression as an attribute by the type-checker, then it can't // possibly be part of qualified name in a namespace. - _, isAttr := p.refMap[operand.GetId()] + _, isAttr := p.refMap[operand.ID()] if isAttr { return "", false } // Since functions cannot be both namespaced and receiver functions, if the operand is not an // qualified variable name, return the (possibly) qualified name given the expressions. - return containers.ToQualifiedName(operand) + switch operand.Kind() { + case ast.IdentKind: + id := operand.AsIdent() + return id, true + case ast.SelectKind: + sel := operand.AsSelect() + // Test only expressions are not valid as qualified names. + if sel.IsTestOnly() { + return "", false + } + if qual, found := p.toQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true + } + } + return "", false } func stripLeadingDot(name string) string { diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go index b8834b1cb8..410d80dc43 100644 --- a/vendor/github.com/google/cel-go/interpreter/prune.go +++ b/vendor/github.com/google/cel-go/interpreter/prune.go @@ -15,19 +15,18 @@ package interpreter import ( + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - structpb "google.golang.org/protobuf/types/known/structpb" ) type astPruner struct { - expr *exprpb.Expr - macroCalls map[int64]*exprpb.Expr + ast.ExprFactory + expr ast.Expr + macroCalls map[int64]ast.Expr state EvalState nextExprID int64 } @@ -67,84 +66,44 @@ type astPruner struct { // compiled and constant folded expressions, but is not willing to constant // fold(and thus cache results of) some external calls, then they can prepare // the overloads accordingly. -func PruneAst(expr *exprpb.Expr, macroCalls map[int64]*exprpb.Expr, state EvalState) *exprpb.ParsedExpr { +func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *ast.AST { pruneState := NewEvalState() for _, id := range state.IDs() { v, _ := state.Value(id) pruneState.SetValue(id, v) } pruner := &astPruner{ - expr: expr, - macroCalls: macroCalls, - state: pruneState, - nextExprID: getMaxID(expr)} + ExprFactory: ast.NewExprFactory(), + expr: expr, + macroCalls: macroCalls, + state: pruneState, + nextExprID: getMaxID(expr)} newExpr, _ := pruner.maybePrune(expr) - return &exprpb.ParsedExpr{ - Expr: newExpr, - SourceInfo: &exprpb.SourceInfo{MacroCalls: pruner.macroCalls}, - } -} - -func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr { - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_ConstExpr{ - ConstExpr: val, - }, + newInfo := ast.NewSourceInfo(nil) + for id, call := range pruner.macroCalls { + newInfo.SetMacroCall(id, call) } + return ast.NewAST(newExpr, newInfo) } -func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) { +func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) { switch v := val.(type) { - case types.Bool: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: bool(v)}}), true - case types.Bytes: + case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint: p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: []byte(v)}}), true - case types.Double: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: float64(v)}}), true + return p.NewLiteral(id, val), true case types.Duration: p.state.SetValue(id, val) - durationString := string(v.ConvertToType(types.StringType).(types.String)) - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Function: overloads.TypeConvertDuration, - Args: []*exprpb.Expr{ - p.createLiteral(p.nextID(), - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: durationString}}), - }, - }, - }, - }, true - case types.Int: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: int64(v)}}), true - case types.Uint: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: uint64(v)}}), true - case types.String: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: string(v)}}), true - case types.Null: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: v.Value().(structpb.NullValue)}}), true + durationString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertDuration, p.NewLiteral(p.nextID(), durationString)), true + case types.Timestamp: + timestampString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertTimestamp, p.NewLiteral(p.nextID(), timestampString)), true } // Attempt to build a list literal. if list, isList := val.(traits.Lister); isList { sz := list.Size().(types.Int) - elemExprs := make([]*exprpb.Expr, sz) + elemExprs := make([]ast.Expr, sz) for i := types.Int(0); i < sz; i++ { elem := list.Get(i) if types.IsUnknownOrError(elem) { @@ -157,20 +116,13 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo elemExprs[i] = elemExpr } p.state.SetValue(id, val) - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: elemExprs, - }, - }, - }, true + return p.NewList(id, elemExprs, []int32{}), true } // Create a map literal if possible. if mp, isMap := val.(traits.Mapper); isMap { it := mp.Iterator() - entries := make([]*exprpb.Expr_CreateStruct_Entry, mp.Size().(types.Int)) + entries := make([]ast.EntryExpr, mp.Size().(types.Int)) i := 0 for it.HasNext() != types.False { key := it.Next() @@ -186,25 +138,12 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo if !ok { return nil, false } - entry := &exprpb.Expr_CreateStruct_Entry{ - Id: p.nextID(), - KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: keyExpr, - }, - Value: valExpr, - } + entry := p.NewMapEntry(p.nextID(), keyExpr, valExpr, false) entries[i] = entry i++ } p.state.SetValue(id, val) - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - Entries: entries, - }, - }, - }, true + return p.NewMap(id, entries), true } // TODO(issues/377) To construct message literals, the type provider will need to support @@ -212,215 +151,206 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo return nil, false } -func (p *astPruner) maybePruneOptional(elem *exprpb.Expr) (*exprpb.Expr, bool) { - elemVal, found := p.value(elem.GetId()) +func (p *astPruner) maybePruneOptional(elem ast.Expr) (ast.Expr, bool) { + elemVal, found := p.value(elem.ID()) if found && elemVal.Type() == types.OptionalType { opt := elemVal.(*types.Optional) if !opt.HasValue() { return nil, true } - if newElem, pruned := p.maybeCreateLiteral(elem.GetId(), opt.GetValue()); pruned { + if newElem, pruned := p.maybeCreateLiteral(elem.ID(), opt.GetValue()); pruned { return newElem, true } } return elem, false } -func (p *astPruner) maybePruneIn(node *exprpb.Expr) (*exprpb.Expr, bool) { +func (p *astPruner) maybePruneIn(node ast.Expr) (ast.Expr, bool) { // elem in list - call := node.GetCallExpr() - val, exists := p.maybeValue(call.GetArgs()[1].GetId()) + call := node.AsCall() + val, exists := p.maybeValue(call.Args()[1].ID()) if !exists { return nil, false } if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero { - return p.maybeCreateLiteral(node.GetId(), types.False) + return p.maybeCreateLiteral(node.ID(), types.False) } return nil, false } -func (p *astPruner) maybePruneLogicalNot(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() - arg := call.GetArgs()[0] - val, exists := p.maybeValue(arg.GetId()) +func (p *astPruner) maybePruneLogicalNot(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + arg := call.Args()[0] + val, exists := p.maybeValue(arg.ID()) if !exists { return nil, false } if b, ok := val.(types.Bool); ok { - return p.maybeCreateLiteral(node.GetId(), !b) + return p.maybeCreateLiteral(node.ID(), !b) } return nil, false } -func (p *astPruner) maybePruneOr(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() +func (p *astPruner) maybePruneOr(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() // We know result is unknown, so we have at least one unknown arg // and if one side is a known value, we know we can ignore it. - if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { if v == types.True { - return p.maybeCreateLiteral(node.GetId(), types.True) + return p.maybeCreateLiteral(node.ID(), types.True) } - return call.GetArgs()[1], true + return call.Args()[1], true } - if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { if v == types.True { - return p.maybeCreateLiteral(node.GetId(), types.True) + return p.maybeCreateLiteral(node.ID(), types.True) } - return call.GetArgs()[0], true + return call.Args()[0], true } return nil, false } -func (p *astPruner) maybePruneAnd(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() +func (p *astPruner) maybePruneAnd(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() // We know result is unknown, so we have at least one unknown arg // and if one side is a known value, we know we can ignore it. - if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { if v == types.False { - return p.maybeCreateLiteral(node.GetId(), types.False) + return p.maybeCreateLiteral(node.ID(), types.False) } - return call.GetArgs()[1], true + return call.Args()[1], true } - if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { if v == types.False { - return p.maybeCreateLiteral(node.GetId(), types.False) + return p.maybeCreateLiteral(node.ID(), types.False) } - return call.GetArgs()[0], true + return call.Args()[0], true } return nil, false } -func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() - cond, exists := p.maybeValue(call.GetArgs()[0].GetId()) +func (p *astPruner) maybePruneConditional(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + cond, exists := p.maybeValue(call.Args()[0].ID()) if !exists { return nil, false } if cond.Value().(bool) { - return call.GetArgs()[1], true + return call.Args()[1], true } - return call.GetArgs()[2], true + return call.Args()[2], true } -func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) { - if _, exists := p.value(node.GetId()); !exists { +func (p *astPruner) maybePruneFunction(node ast.Expr) (ast.Expr, bool) { + if _, exists := p.value(node.ID()); !exists { return nil, false } - call := node.GetCallExpr() - if call.Function == operators.LogicalOr { + call := node.AsCall() + if call.FunctionName() == operators.LogicalOr { return p.maybePruneOr(node) } - if call.Function == operators.LogicalAnd { + if call.FunctionName() == operators.LogicalAnd { return p.maybePruneAnd(node) } - if call.Function == operators.Conditional { + if call.FunctionName() == operators.Conditional { return p.maybePruneConditional(node) } - if call.Function == operators.In { + if call.FunctionName() == operators.In { return p.maybePruneIn(node) } - if call.Function == operators.LogicalNot { + if call.FunctionName() == operators.LogicalNot { return p.maybePruneLogicalNot(node) } return nil, false } -func (p *astPruner) maybePrune(node *exprpb.Expr) (*exprpb.Expr, bool) { +func (p *astPruner) maybePrune(node ast.Expr) (ast.Expr, bool) { return p.prune(node) } -func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { +func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { if node == nil { return node, false } - val, valueExists := p.maybeValue(node.GetId()) + val, valueExists := p.maybeValue(node.ID()) if valueExists { - if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok { - delete(p.macroCalls, node.GetId()) + if newNode, ok := p.maybeCreateLiteral(node.ID(), val); ok { + delete(p.macroCalls, node.ID()) return newNode, true } } - if macro, found := p.macroCalls[node.GetId()]; found { + if macro, found := p.macroCalls[node.ID()]; found { // Ensure that intermediate values for the comprehension are cleared during pruning - compre := node.GetComprehensionExpr() - if compre != nil { - visit(macro, clearIterVarVisitor(compre.IterVar, p.state)) + if node.Kind() == ast.ComprehensionKind { + compre := node.AsComprehension() + visit(macro, clearIterVarVisitor(compre.IterVar(), p.state)) } // prune the expression in terms of the macro call instead of the expanded form. if newMacro, pruned := p.prune(macro); pruned { - p.macroCalls[node.GetId()] = newMacro + p.macroCalls[node.ID()] = newMacro } } // We have either an unknown/error value, or something we don't want to // transform, or expression was not evaluated. If possible, drill down // more. - switch node.GetExprKind().(type) { - case *exprpb.Expr_SelectExpr: - if operand, pruned := p.maybePrune(node.GetSelectExpr().GetOperand()); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_SelectExpr{ - SelectExpr: &exprpb.Expr_Select{ - Operand: operand, - Field: node.GetSelectExpr().GetField(), - TestOnly: node.GetSelectExpr().GetTestOnly(), - }, - }, - }, true - } - case *exprpb.Expr_CallExpr: - var prunedCall bool - call := node.GetCallExpr() - args := call.GetArgs() - newArgs := make([]*exprpb.Expr, len(args)) - newCall := &exprpb.Expr_Call{ - Function: call.GetFunction(), - Target: call.GetTarget(), - Args: newArgs, - } - for i, arg := range args { - newArgs[i] = arg - if newArg, prunedArg := p.maybePrune(arg); prunedArg { - prunedCall = true - newArgs[i] = newArg + switch node.Kind() { + case ast.SelectKind: + sel := node.AsSelect() + if operand, isPruned := p.maybePrune(sel.Operand()); isPruned { + if sel.IsTestOnly() { + return p.NewPresenceTest(node.ID(), operand, sel.FieldName()), true } + return p.NewSelect(node.ID(), operand, sel.FieldName()), true } - if newTarget, prunedTarget := p.maybePrune(call.GetTarget()); prunedTarget { - prunedCall = true - newCall.Target = newTarget + case ast.CallKind: + argsPruned := false + call := node.AsCall() + args := call.Args() + newArgs := make([]ast.Expr, len(args)) + for i, a := range args { + newArgs[i] = a + if arg, isPruned := p.maybePrune(a); isPruned { + argsPruned = true + newArgs[i] = arg + } } - newNode := &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: newCall, - }, + if !call.IsMemberFunction() { + newCall := p.NewCall(node.ID(), call.FunctionName(), newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true + } + return newCall, argsPruned } - if newExpr, pruned := p.maybePruneFunction(newNode); pruned { - newExpr, _ = p.maybePrune(newExpr) - return newExpr, true + newTarget := call.Target() + targetPruned := false + if prunedTarget, isPruned := p.maybePrune(call.Target()); isPruned { + targetPruned = true + newTarget = prunedTarget } - if prunedCall { - return newNode, true + newCall := p.NewMemberCall(node.ID(), call.FunctionName(), newTarget, newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true } - case *exprpb.Expr_ListExpr: - elems := node.GetListExpr().GetElements() - optIndices := node.GetListExpr().GetOptionalIndices() + return newCall, targetPruned || argsPruned + case ast.ListKind: + l := node.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() optIndexMap := map[int32]bool{} for _, i := range optIndices { optIndexMap[i] = true } newOptIndexMap := make(map[int32]bool, len(optIndexMap)) - newElems := make([]*exprpb.Expr, 0, len(elems)) - var prunedList bool - + newElems := make([]ast.Expr, 0, len(elems)) + var listPruned bool prunedIdx := 0 for i, elem := range elems { _, isOpt := optIndexMap[int32(i)] if isOpt { newElem, pruned := p.maybePruneOptional(elem) if pruned { - prunedList = true + listPruned = true if newElem != nil { newElems = append(newElems, newElem) prunedIdx++ @@ -431,7 +361,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { } if newElem, prunedElem := p.maybePrune(elem); prunedElem { newElems = append(newElems, newElem) - prunedList = true + listPruned = true } else { newElems = append(newElems, elem) } @@ -443,76 +373,64 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { optIndices[idx] = i idx++ } - if prunedList { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: newElems, - OptionalIndices: optIndices, - }, - }, - }, true + if listPruned { + return p.NewList(node.ID(), newElems, optIndices), true } - case *exprpb.Expr_StructExpr: - var prunedStruct bool - entries := node.GetStructExpr().GetEntries() - messageType := node.GetStructExpr().GetMessageName() - newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries)) + case ast.MapKind: + var mapPruned bool + m := node.AsMap() + entries := m.Entries() + newEntries := make([]ast.EntryExpr, len(entries)) for i, entry := range entries { newEntries[i] = entry - newKey, prunedKey := p.maybePrune(entry.GetMapKey()) - newValue, prunedValue := p.maybePrune(entry.GetValue()) - if !prunedKey && !prunedValue { + e := entry.AsMapEntry() + newKey, keyPruned := p.maybePrune(e.Key()) + newValue, valuePruned := p.maybePrune(e.Value()) + if !keyPruned && !valuePruned { continue } - prunedStruct = true - newEntry := &exprpb.Expr_CreateStruct_Entry{ - Value: newValue, - } - if messageType != "" { - newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{ - FieldKey: entry.GetFieldKey(), - } - } else { - newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: newKey, - } - } - newEntry.OptionalEntry = entry.GetOptionalEntry() + mapPruned = true + newEntry := p.NewMapEntry(entry.ID(), newKey, newValue, e.IsOptional()) newEntries[i] = newEntry } - if prunedStruct { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - MessageName: messageType, - Entries: newEntries, - }, - }, - }, true + if mapPruned { + return p.NewMap(node.ID(), newEntries), true } - case *exprpb.Expr_ComprehensionExpr: - compre := node.GetComprehensionExpr() + case ast.StructKind: + var structPruned bool + obj := node.AsStruct() + fields := obj.Fields() + newFields := make([]ast.EntryExpr, len(fields)) + for i, field := range fields { + newFields[i] = field + f := field.AsStructField() + newValue, prunedValue := p.maybePrune(f.Value()) + if !prunedValue { + continue + } + structPruned = true + newEntry := p.NewStructField(field.ID(), f.Name(), newValue, f.IsOptional()) + newFields[i] = newEntry + } + if structPruned { + return p.NewStruct(node.ID(), obj.TypeName(), newFields), true + } + case ast.ComprehensionKind: + compre := node.AsComprehension() // Only the range of the comprehension is pruned since the state tracking only records // the last iteration of the comprehension and not each step in the evaluation which // means that the any residuals computed in between might be inaccurate. - if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - IterVar: compre.GetIterVar(), - IterRange: newRange, - AccuVar: compre.GetAccuVar(), - AccuInit: compre.GetAccuInit(), - LoopCondition: compre.GetLoopCondition(), - LoopStep: compre.GetLoopStep(), - Result: compre.GetResult(), - }, - }, - }, true + if newRange, pruned := p.maybePrune(compre.IterRange()); pruned { + return p.NewComprehension( + node.ID(), + newRange, + compre.IterVar(), + compre.AccuVar(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result(), + ), true } } return node, false @@ -539,12 +457,12 @@ func (p *astPruner) nextID() int64 { type astVisitor struct { // visitEntry is called on every expr node, including those within a map/struct entry. - visitExpr func(expr *exprpb.Expr) + visitExpr func(expr ast.Expr) // visitEntry is called before entering the key, value of a map/struct entry. - visitEntry func(entry *exprpb.Expr_CreateStruct_Entry) + visitEntry func(entry ast.EntryExpr) } -func getMaxID(expr *exprpb.Expr) int64 { +func getMaxID(expr ast.Expr) int64 { maxID := int64(1) visit(expr, maxIDVisitor(&maxID)) return maxID @@ -552,10 +470,9 @@ func getMaxID(expr *exprpb.Expr) int64 { func clearIterVarVisitor(varName string, state EvalState) astVisitor { return astVisitor{ - visitExpr: func(e *exprpb.Expr) { - ident := e.GetIdentExpr() - if ident != nil && ident.GetName() == varName { - state.SetValue(e.GetId(), nil) + visitExpr: func(e ast.Expr) { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + state.SetValue(e.ID(), nil) } }, } @@ -563,56 +480,63 @@ func clearIterVarVisitor(varName string, state EvalState) astVisitor { func maxIDVisitor(maxID *int64) astVisitor { return astVisitor{ - visitExpr: func(e *exprpb.Expr) { - if e.GetId() >= *maxID { - *maxID = e.GetId() + 1 + visitExpr: func(e ast.Expr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 } }, - visitEntry: func(e *exprpb.Expr_CreateStruct_Entry) { - if e.GetId() >= *maxID { - *maxID = e.GetId() + 1 + visitEntry: func(e ast.EntryExpr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 } }, } } -func visit(expr *exprpb.Expr, visitor astVisitor) { - exprs := []*exprpb.Expr{expr} +func visit(expr ast.Expr, visitor astVisitor) { + exprs := []ast.Expr{expr} for len(exprs) != 0 { e := exprs[0] if visitor.visitExpr != nil { visitor.visitExpr(e) } exprs = exprs[1:] - switch e.GetExprKind().(type) { - case *exprpb.Expr_SelectExpr: - exprs = append(exprs, e.GetSelectExpr().GetOperand()) - case *exprpb.Expr_CallExpr: - call := e.GetCallExpr() - if call.GetTarget() != nil { - exprs = append(exprs, call.GetTarget()) + switch e.Kind() { + case ast.SelectKind: + exprs = append(exprs, e.AsSelect().Operand()) + case ast.CallKind: + call := e.AsCall() + if call.Target() != nil { + exprs = append(exprs, call.Target()) } - exprs = append(exprs, call.GetArgs()...) - case *exprpb.Expr_ComprehensionExpr: - compre := e.GetComprehensionExpr() + exprs = append(exprs, call.Args()...) + case ast.ComprehensionKind: + compre := e.AsComprehension() exprs = append(exprs, - compre.GetIterRange(), - compre.GetAccuInit(), - compre.GetLoopCondition(), - compre.GetLoopStep(), - compre.GetResult()) - case *exprpb.Expr_ListExpr: - list := e.GetListExpr() - exprs = append(exprs, list.GetElements()...) - case *exprpb.Expr_StructExpr: - for _, entry := range e.GetStructExpr().GetEntries() { + compre.IterRange(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result()) + case ast.ListKind: + list := e.AsList() + exprs = append(exprs, list.Elements()...) + case ast.MapKind: + for _, entry := range e.AsMap().Entries() { + e := entry.AsMapEntry() if visitor.visitEntry != nil { visitor.visitEntry(entry) } - if entry.GetMapKey() != nil { - exprs = append(exprs, entry.GetMapKey()) + exprs = append(exprs, e.Key()) + exprs = append(exprs, e.Value()) + } + case ast.StructKind: + for _, entry := range e.AsStruct().Fields() { + f := entry.AsStructField() + if visitor.visitEntry != nil { + visitor.visitEntry(entry) } - exprs = append(exprs, entry.GetValue()) + exprs = append(exprs, f.Value()) } } } diff --git a/vendor/github.com/google/cel-go/parser/BUILD.bazel b/vendor/github.com/google/cel-go/parser/BUILD.bazel index 67ecc95543..97bc9bd434 100644 --- a/vendor/github.com/google/cel-go/parser/BUILD.bazel +++ b/vendor/github.com/google/cel-go/parser/BUILD.bazel @@ -20,10 +20,13 @@ go_library( visibility = ["//visibility:public"], deps = [ "//common:go_default_library", + "//common/ast:go_default_library", "//common/operators:go_default_library", "//common/runes:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", "//parser/gen:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", @@ -43,10 +46,12 @@ go_test( ":go_default_library", ], deps = [ + "//common/ast:go_default_library", "//common/debug:go_default_library", + "//common/types:go_default_library", "//parser/gen:go_default_library", "//test:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//testing/protocmp:go_default_library", ], diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel index 654d1de7aa..e704334834 100644 --- a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel +++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -21,6 +21,6 @@ go_library( ], importpath = "github.com/google/cel-go/parser/gen", deps = [ - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go index 0247f470a7..c49d038676 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go @@ -1,7 +1,7 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" // BaseCELListener is a complete listener for a parse tree produced by CELParser. type BaseCELListener struct{} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go index 52a7f4dc57..b2c0783d39 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go @@ -1,7 +1,8 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" + type BaseCELVisitor struct { *antlr.BaseParseTreeVisitor diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go index 98ddc06d0b..e026cc46f0 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go @@ -1,280 +1,278 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen - import ( "fmt" - "sync" + "sync" "unicode" - - "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/antlr4-go/antlr/v4" ) - // Suppress unused import error var _ = fmt.Printf var _ = sync.Once{} var _ = unicode.IsLetter + type CELLexer struct { *antlr.BaseLexer channelNames []string - modeNames []string + modeNames []string // TODO: EOF string } -var cellexerLexerStaticData struct { - once sync.Once - serializedATN []int32 - channelNames []string - modeNames []string - literalNames []string - symbolicNames []string - ruleNames []string - predictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA +var CELLexerLexerStaticData struct { + once sync.Once + serializedATN []int32 + ChannelNames []string + ModeNames []string + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA } func cellexerLexerInit() { - staticData := &cellexerLexerStaticData - staticData.channelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", - } - staticData.modeNames = []string{ - "DEFAULT_MODE", - } - staticData.literalNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", - "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", - } - staticData.symbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", - "STRING", "BYTES", "IDENTIFIER", - } - staticData.ruleNames = []string{ - "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", - "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", - "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", - "BYTES", "IDENTIFIER", - } - staticData.predictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, - 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, - 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, - 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, - 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, - 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, - 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, - 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, - 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, - 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, - 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, - 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, - 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, - 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, - 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, - 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, - 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, - 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, - 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, - 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, - 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, - 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, - 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, - 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, - 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, - 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, - 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, - 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, - 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, - 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, - 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, - 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, - 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, - 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, - 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, - 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, - 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, - 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, - 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, - 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, - 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, - 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, - 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, - 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, - 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, - 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, - 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, - 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, - 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, - 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, - 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, - 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, - 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, - 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, - 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, - 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, - 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, - 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, - 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, - 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, - 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, - 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, - 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, - 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, - 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, - 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, - 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, - 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, - 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, - 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, - 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, - 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, - 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, - 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, - 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, - 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, - 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, - 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, - 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, - 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, - 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, - 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, - 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, - 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, - 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, - 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, - 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, - 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, - 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, - 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, - 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, - 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, - 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, - 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, - 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, - 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, - 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, - 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, - 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, - 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, - 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, - 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, - 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, - 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, - 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, - 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, - 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, - 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, - 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, - 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, - 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, - 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, - 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, - 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, - 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, - 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, - 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, - 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, - 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, - 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, - 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, - 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, - 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, - 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, - 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, - 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, - 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, - 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, - 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, - 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, - 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, - 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, - 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, - 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, - 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, - 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, - 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, - 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, - 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, - 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, - 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, - 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, - 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, - 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, - 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, - 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, - 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, - 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, - 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, - 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, - 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, - 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, - 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, - 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, - 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, - 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, - 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, - 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, - 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, - 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, - 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, - 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, - 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, - 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, - 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, - 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, - 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, - 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, - 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, - 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, - 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, - 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, - 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, - 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, - 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, - 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, - 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, - 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, - 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, - 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, - 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, - 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, - 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, - 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, - 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, - 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, - 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, - 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, - 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, - 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, - 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, - 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, - 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, - 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, - 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, - 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, - 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, - 413, 418, 420, 1, 0, 1, 0, - } - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } + staticData := &CELLexerLexerStaticData + staticData.ChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.ModeNames = []string{ + "DEFAULT_MODE", + } + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", + "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", + "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", + "BYTES", "IDENTIFIER", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, + 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, + 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, + 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, + 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, + 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, + 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, + 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, + 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, + 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, + 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, + 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, + 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, + 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, + 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, + 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, + 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, + 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, + 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, + 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, + 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, + 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, + 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, + 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, + 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, + 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, + 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, + 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, + 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, + 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, + 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, + 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, + 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, + 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, + 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, + 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, + 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, + 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, + 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, + 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, + 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, + 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, + 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, + 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, + 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, + 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, + 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, + 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, + 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, + 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, + 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, + 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, + 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, + 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, + 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, + 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, + 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, + 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, + 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, + 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, + 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, + 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, + 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, + 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, + 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, + 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, + 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, + 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, + 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, + 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, + 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, + 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, + 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, + 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, + 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, + 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, + 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, + 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, + 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, + 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, + 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, + 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, + 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, + 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, + 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, + 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, + 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, + 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, + 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, + 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, + 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, + 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, + 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, + 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, + 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, + 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, + 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, + 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, + 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, + 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, + 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, + 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, + 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, + 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, + 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, + 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, + 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, + 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, + 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, + 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, + 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, + 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, + 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, + 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, + 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, + 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, + 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, + 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, + 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, + 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, + 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, + 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, + 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, + 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, + 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, + 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, + 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, + 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, + 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, + 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, + 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, + 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, + 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, + 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, + 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, + 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, + 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, + 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, + 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, + 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, + 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, + 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, + 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, + 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, + 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, + 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, + 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, + 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, + 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, + 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, + 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, + 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, + 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, + 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, + 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, + 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, + 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, + 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, + 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, + 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, + 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, + 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, + 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, + 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, + 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, + 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, + 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, + 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, + 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, + 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, + 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, + 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, + 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, + 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, + 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, + 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, + 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, + 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, + 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, + 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, + 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, + 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, + 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, + 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, + 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, + 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, + 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, + 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, + 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, + 413, 418, 420, 1, 0, 1, 0, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } } // CELLexerInit initializes any static state used to implement CELLexer. By default the @@ -282,22 +280,22 @@ func cellexerLexerInit() { // NewCELLexer(). You can call this function if you wish to initialize the static state ahead // of time. func CELLexerInit() { - staticData := &cellexerLexerStaticData - staticData.once.Do(cellexerLexerInit) + staticData := &CELLexerLexerStaticData + staticData.once.Do(cellexerLexerInit) } // NewCELLexer produces a new lexer instance for the optional input antlr.CharStream. func NewCELLexer(input antlr.CharStream) *CELLexer { - CELLexerInit() + CELLexerInit() l := new(CELLexer) l.BaseLexer = antlr.NewBaseLexer(input) - staticData := &cellexerLexerStaticData - l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) - l.channelNames = staticData.channelNames - l.modeNames = staticData.modeNames - l.RuleNames = staticData.ruleNames - l.LiteralNames = staticData.literalNames - l.SymbolicNames = staticData.symbolicNames + staticData := &CELLexerLexerStaticData + l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + l.channelNames = staticData.ChannelNames + l.modeNames = staticData.ModeNames + l.RuleNames = staticData.RuleNames + l.LiteralNames = staticData.LiteralNames + l.SymbolicNames = staticData.SymbolicNames l.GrammarFileName = "CEL.g4" // TODO: l.EOF = antlr.TokenEOF @@ -306,40 +304,41 @@ func NewCELLexer(input antlr.CharStream) *CELLexer { // CELLexer tokens. const ( - CELLexerEQUALS = 1 - CELLexerNOT_EQUALS = 2 - CELLexerIN = 3 - CELLexerLESS = 4 - CELLexerLESS_EQUALS = 5 + CELLexerEQUALS = 1 + CELLexerNOT_EQUALS = 2 + CELLexerIN = 3 + CELLexerLESS = 4 + CELLexerLESS_EQUALS = 5 CELLexerGREATER_EQUALS = 6 - CELLexerGREATER = 7 - CELLexerLOGICAL_AND = 8 - CELLexerLOGICAL_OR = 9 - CELLexerLBRACKET = 10 - CELLexerRPRACKET = 11 - CELLexerLBRACE = 12 - CELLexerRBRACE = 13 - CELLexerLPAREN = 14 - CELLexerRPAREN = 15 - CELLexerDOT = 16 - CELLexerCOMMA = 17 - CELLexerMINUS = 18 - CELLexerEXCLAM = 19 - CELLexerQUESTIONMARK = 20 - CELLexerCOLON = 21 - CELLexerPLUS = 22 - CELLexerSTAR = 23 - CELLexerSLASH = 24 - CELLexerPERCENT = 25 - CELLexerCEL_TRUE = 26 - CELLexerCEL_FALSE = 27 - CELLexerNUL = 28 - CELLexerWHITESPACE = 29 - CELLexerCOMMENT = 30 - CELLexerNUM_FLOAT = 31 - CELLexerNUM_INT = 32 - CELLexerNUM_UINT = 33 - CELLexerSTRING = 34 - CELLexerBYTES = 35 - CELLexerIDENTIFIER = 36 + CELLexerGREATER = 7 + CELLexerLOGICAL_AND = 8 + CELLexerLOGICAL_OR = 9 + CELLexerLBRACKET = 10 + CELLexerRPRACKET = 11 + CELLexerLBRACE = 12 + CELLexerRBRACE = 13 + CELLexerLPAREN = 14 + CELLexerRPAREN = 15 + CELLexerDOT = 16 + CELLexerCOMMA = 17 + CELLexerMINUS = 18 + CELLexerEXCLAM = 19 + CELLexerQUESTIONMARK = 20 + CELLexerCOLON = 21 + CELLexerPLUS = 22 + CELLexerSTAR = 23 + CELLexerSLASH = 24 + CELLexerPERCENT = 25 + CELLexerCEL_TRUE = 26 + CELLexerCEL_FALSE = 27 + CELLexerNUL = 28 + CELLexerWHITESPACE = 29 + CELLexerCOMMENT = 30 + CELLexerNUM_FLOAT = 31 + CELLexerNUM_INT = 32 + CELLexerNUM_UINT = 33 + CELLexerSTRING = 34 + CELLexerBYTES = 35 + CELLexerIDENTIFIER = 36 ) + diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go index 73b7f1d39f..22dc997898 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go @@ -1,7 +1,8 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" + // CELListener is a complete listener for a parse tree produced by CELParser. type CELListener interface { diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go index 0cb6c8eae8..35334af615 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go @@ -1,12 +1,12 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL import ( "fmt" "strconv" - "sync" + "sync" - "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/antlr4-go/antlr/v4" ) // Suppress unused import errors @@ -14,166 +14,167 @@ var _ = fmt.Printf var _ = strconv.Itoa var _ = sync.Once{} + type CELParser struct { *antlr.BaseParser } -var celParserStaticData struct { - once sync.Once - serializedATN []int32 - literalNames []string - symbolicNames []string - ruleNames []string - predictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA +var CELParserStaticData struct { + once sync.Once + serializedATN []int32 + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA } func celParserInit() { - staticData := &celParserStaticData - staticData.literalNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", - "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", - } - staticData.symbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", - "STRING", "BYTES", "IDENTIFIER", - } - staticData.ruleNames = []string{ - "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", - "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", - "optField", "mapInitializerList", "optExpr", "literal", - } - staticData.predictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, - 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, - 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, - 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, - 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, - 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, - 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, - 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, - 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, - 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, - 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, - 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, - 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, - 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, - 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, - 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, - 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, - 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, - 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, - 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, - 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, - 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, - 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, - 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, - 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, - 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, - 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, - 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, - 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, - 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, - 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, - 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, - 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, - 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, - 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, - 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, - 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, - 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, - 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, - 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, - 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, - 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, - 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, - 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, - 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, - 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, - 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, - 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, - 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, - 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, - 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, - 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, - 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, - 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, - 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, - 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, - 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, - 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, - 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, - 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, - 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, - 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, - 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, - 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, - 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, - 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, - 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, - 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, - 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, - 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, - 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, - 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, - 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, - 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, - 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, - 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, - 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, - 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, - 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, - 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, - 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, - 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, - 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, - 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, - 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, - 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, - 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, - 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, - 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, - 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, - 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, - 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, - 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, - 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, - 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, - 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, - 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, - 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, - 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, - 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, - 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, - 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, - 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, - 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, - 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, - 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, - 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, - 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, - 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, - 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, - 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, - 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, - 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, - 240, 248, - } - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } + staticData := &CELParserStaticData + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", + "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", + "optField", "mapInitializerList", "optExpr", "literal", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, + 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, + 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, + 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, + 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, + 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, + 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, + 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, + 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, + 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, + 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, + 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, + 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, + 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, + 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, + 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, + 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, + 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, + 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, + 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, + 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, + 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, + 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, + 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, + 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, + 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, + 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, + 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, + 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, + 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, + 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, + 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, + 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, + 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, + 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, + 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, + 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, + 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, + 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, + 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, + 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, + 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, + 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, + 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, + 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, + 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, + 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, + 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, + 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, + 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, + 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, + 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, + 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, + 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, + 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, + 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, + 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, + 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, + 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, + 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, + 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, + 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, + 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, + 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, + 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, + 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, + 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, + 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, + 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, + 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, + 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, + 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, + 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, + 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, + 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, + 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, + 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, + 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, + 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, + 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, + 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, + 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, + 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, + 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, + 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, + 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, + 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, + 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, + 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, + 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, + 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, + 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, + 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, + 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, + 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, + 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, + 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, + 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, + 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, + 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, + 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, + 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, + 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, + 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, + 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, + 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, + 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, + 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, + 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, + 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, + 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, + 240, 248, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } } // CELParserInit initializes any static state used to implement CELParser. By default the @@ -181,8 +182,8 @@ func celParserInit() { // NewCELParser(). You can call this function if you wish to initialize the static state ahead // of time. func CELParserInit() { - staticData := &celParserStaticData - staticData.once.Do(celParserInit) + staticData := &CELParserStaticData + staticData.once.Do(celParserInit) } // NewCELParser produces a new parser instance for the optional input antlr.TokenStream. @@ -190,75 +191,76 @@ func NewCELParser(input antlr.TokenStream) *CELParser { CELParserInit() this := new(CELParser) this.BaseParser = antlr.NewBaseParser(input) - staticData := &celParserStaticData - this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) - this.RuleNames = staticData.ruleNames - this.LiteralNames = staticData.literalNames - this.SymbolicNames = staticData.symbolicNames + staticData := &CELParserStaticData + this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + this.RuleNames = staticData.RuleNames + this.LiteralNames = staticData.LiteralNames + this.SymbolicNames = staticData.SymbolicNames this.GrammarFileName = "CEL.g4" return this } + // CELParser tokens. const ( - CELParserEOF = antlr.TokenEOF - CELParserEQUALS = 1 - CELParserNOT_EQUALS = 2 - CELParserIN = 3 - CELParserLESS = 4 - CELParserLESS_EQUALS = 5 + CELParserEOF = antlr.TokenEOF + CELParserEQUALS = 1 + CELParserNOT_EQUALS = 2 + CELParserIN = 3 + CELParserLESS = 4 + CELParserLESS_EQUALS = 5 CELParserGREATER_EQUALS = 6 - CELParserGREATER = 7 - CELParserLOGICAL_AND = 8 - CELParserLOGICAL_OR = 9 - CELParserLBRACKET = 10 - CELParserRPRACKET = 11 - CELParserLBRACE = 12 - CELParserRBRACE = 13 - CELParserLPAREN = 14 - CELParserRPAREN = 15 - CELParserDOT = 16 - CELParserCOMMA = 17 - CELParserMINUS = 18 - CELParserEXCLAM = 19 - CELParserQUESTIONMARK = 20 - CELParserCOLON = 21 - CELParserPLUS = 22 - CELParserSTAR = 23 - CELParserSLASH = 24 - CELParserPERCENT = 25 - CELParserCEL_TRUE = 26 - CELParserCEL_FALSE = 27 - CELParserNUL = 28 - CELParserWHITESPACE = 29 - CELParserCOMMENT = 30 - CELParserNUM_FLOAT = 31 - CELParserNUM_INT = 32 - CELParserNUM_UINT = 33 - CELParserSTRING = 34 - CELParserBYTES = 35 - CELParserIDENTIFIER = 36 + CELParserGREATER = 7 + CELParserLOGICAL_AND = 8 + CELParserLOGICAL_OR = 9 + CELParserLBRACKET = 10 + CELParserRPRACKET = 11 + CELParserLBRACE = 12 + CELParserRBRACE = 13 + CELParserLPAREN = 14 + CELParserRPAREN = 15 + CELParserDOT = 16 + CELParserCOMMA = 17 + CELParserMINUS = 18 + CELParserEXCLAM = 19 + CELParserQUESTIONMARK = 20 + CELParserCOLON = 21 + CELParserPLUS = 22 + CELParserSTAR = 23 + CELParserSLASH = 24 + CELParserPERCENT = 25 + CELParserCEL_TRUE = 26 + CELParserCEL_FALSE = 27 + CELParserNUL = 28 + CELParserWHITESPACE = 29 + CELParserCOMMENT = 30 + CELParserNUM_FLOAT = 31 + CELParserNUM_INT = 32 + CELParserNUM_UINT = 33 + CELParserSTRING = 34 + CELParserBYTES = 35 + CELParserIDENTIFIER = 36 ) // CELParser rules. const ( - CELParserRULE_start = 0 - CELParserRULE_expr = 1 - CELParserRULE_conditionalOr = 2 - CELParserRULE_conditionalAnd = 3 - CELParserRULE_relation = 4 - CELParserRULE_calc = 5 - CELParserRULE_unary = 6 - CELParserRULE_member = 7 - CELParserRULE_primary = 8 - CELParserRULE_exprList = 9 - CELParserRULE_listInit = 10 + CELParserRULE_start = 0 + CELParserRULE_expr = 1 + CELParserRULE_conditionalOr = 2 + CELParserRULE_conditionalAnd = 3 + CELParserRULE_relation = 4 + CELParserRULE_calc = 5 + CELParserRULE_unary = 6 + CELParserRULE_member = 7 + CELParserRULE_primary = 8 + CELParserRULE_exprList = 9 + CELParserRULE_listInit = 10 CELParserRULE_fieldInitializerList = 11 - CELParserRULE_optField = 12 - CELParserRULE_mapInitializerList = 13 - CELParserRULE_optExpr = 14 - CELParserRULE_literal = 15 + CELParserRULE_optField = 12 + CELParserRULE_mapInitializerList = 13 + CELParserRULE_optExpr = 14 + CELParserRULE_literal = 15 ) // IStartContext is an interface to support dynamic dispatch. @@ -271,9 +273,11 @@ type IStartContext interface { // GetE returns the e rule contexts. GetE() IExprContext + // SetE sets the e rule contexts. SetE(IExprContext) + // Getter signatures EOF() antlr.TerminalNode Expr() IExprContext @@ -283,24 +287,29 @@ type IStartContext interface { } type StartContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - e IExprContext + e IExprContext } func NewEmptyStartContext() *StartContext { var p = new(StartContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_start return p } +func InitEmptyStartContext(p *StartContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_start +} + func (*StartContext) IsStartContext() {} func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext { var p = new(StartContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_start @@ -312,17 +321,19 @@ func (s *StartContext) GetParser() antlr.Parser { return s.parser } func (s *StartContext) GetE() IExprContext { return s.e } + func (s *StartContext) SetE(v IExprContext) { s.e = v } + func (s *StartContext) EOF() antlr.TerminalNode { return s.GetToken(CELParserEOF, 0) } func (s *StartContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -342,6 +353,7 @@ func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterStart(s) @@ -364,45 +376,46 @@ func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Start() (localctx IStartContext) { - this := p - _ = this - localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 0, CELParserRULE_start) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) Start_() (localctx IStartContext) { + localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 0, CELParserRULE_start) p.EnterOuterAlt(localctx, 1) { p.SetState(32) var _x = p.Expr() + localctx.(*StartContext).e = _x } { p.SetState(33) p.Match(CELParserEOF) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IExprContext is an interface to support dynamic dispatch. type IExprContext interface { antlr.ParserRuleContext @@ -411,10 +424,12 @@ type IExprContext interface { GetParser() antlr.Parser // GetOp returns the op token. - GetOp() antlr.Token + GetOp() antlr.Token + // SetOp sets the op token. - SetOp(antlr.Token) + SetOp(antlr.Token) + // GetE returns the e rule contexts. GetE() IConditionalOrContext @@ -425,6 +440,7 @@ type IExprContext interface { // GetE2 returns the e2 rule contexts. GetE2() IExprContext + // SetE sets the e rule contexts. SetE(IConditionalOrContext) @@ -434,6 +450,7 @@ type IExprContext interface { // SetE2 sets the e2 rule contexts. SetE2(IExprContext) + // Getter signatures AllConditionalOr() []IConditionalOrContext ConditionalOr(i int) IConditionalOrContext @@ -446,27 +463,32 @@ type IExprContext interface { } type ExprContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - e IConditionalOrContext - op antlr.Token - e1 IConditionalOrContext - e2 IExprContext + e IConditionalOrContext + op antlr.Token + e1 IConditionalOrContext + e2 IExprContext } func NewEmptyExprContext() *ExprContext { var p = new(ExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_expr return p } +func InitEmptyExprContext(p *ExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_expr +} + func (*ExprContext) IsExprContext() {} func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext { var p = new(ExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_expr @@ -478,20 +500,24 @@ func (s *ExprContext) GetParser() antlr.Parser { return s.parser } func (s *ExprContext) GetOp() antlr.Token { return s.op } + func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } + func (s *ExprContext) GetE() IConditionalOrContext { return s.e } func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 } func (s *ExprContext) GetE2() IExprContext { return s.e2 } + func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v } func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v } func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v } + func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { children := s.GetChildren() len := 0 @@ -514,12 +540,12 @@ func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { } func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalOrContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -542,10 +568,10 @@ func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode { } func (s *ExprContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -565,6 +591,7 @@ func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExpr(s) @@ -587,42 +614,31 @@ func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Expr() (localctx IExprContext) { - this := p - _ = this + + +func (p *CELParser) Expr() (localctx IExprContext) { localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 2, CELParserRULE_expr) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(35) var _x = p.ConditionalOr() + localctx.(*ExprContext).e = _x } p.SetState(41) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(36) @@ -630,31 +646,54 @@ func (p *CELParser) Expr() (localctx IExprContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*ExprContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(37) var _x = p.ConditionalOr() + localctx.(*ExprContext).e1 = _x } { p.SetState(38) p.Match(CELParserCOLON) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(39) var _x = p.Expr() + localctx.(*ExprContext).e2 = _x } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IConditionalOrContext is an interface to support dynamic dispatch. type IConditionalOrContext interface { antlr.ParserRuleContext @@ -663,34 +702,42 @@ type IConditionalOrContext interface { GetParser() antlr.Parser // GetS9 returns the s9 token. - GetS9() antlr.Token + GetS9() antlr.Token + // SetS9 sets the s9 token. - SetS9(antlr.Token) + SetS9(antlr.Token) + // GetOps returns the ops token list. GetOps() []antlr.Token + // SetOps sets the ops token list. SetOps([]antlr.Token) + // GetE returns the e rule contexts. GetE() IConditionalAndContext // Get_conditionalAnd returns the _conditionalAnd rule contexts. Get_conditionalAnd() IConditionalAndContext + // SetE sets the e rule contexts. SetE(IConditionalAndContext) // Set_conditionalAnd sets the _conditionalAnd rule contexts. Set_conditionalAnd(IConditionalAndContext) + // GetE1 returns the e1 rule context list. GetE1() []IConditionalAndContext + // SetE1 sets the e1 rule context list. - SetE1([]IConditionalAndContext) + SetE1([]IConditionalAndContext) + // Getter signatures AllConditionalAnd() []IConditionalAndContext @@ -703,28 +750,33 @@ type IConditionalOrContext interface { } type ConditionalOrContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IConditionalAndContext - s9 antlr.Token - ops []antlr.Token - _conditionalAnd IConditionalAndContext - e1 []IConditionalAndContext + antlr.BaseParserRuleContext + parser antlr.Parser + e IConditionalAndContext + s9 antlr.Token + ops []antlr.Token + _conditionalAnd IConditionalAndContext + e1 []IConditionalAndContext } func NewEmptyConditionalOrContext() *ConditionalOrContext { var p = new(ConditionalOrContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalOr return p } +func InitEmptyConditionalOrContext(p *ConditionalOrContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalOr +} + func (*ConditionalOrContext) IsConditionalOrContext() {} func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext { var p = new(ConditionalOrContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_conditionalOr @@ -736,24 +788,32 @@ func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 } + func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } + func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } + func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } + func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e } func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd } + func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v } func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v } + func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } + func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } + func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { children := s.GetChildren() len := 0 @@ -776,12 +836,12 @@ func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { } func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalAndContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -811,6 +871,7 @@ func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Reco return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalOr(s) @@ -833,42 +894,31 @@ func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } -func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { - this := p - _ = this + + +func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 4, CELParserRULE_conditionalOr) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(43) var _x = p.ConditionalAnd() + localctx.(*ConditionalOrContext).e = _x } p.SetState(48) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserLOGICAL_OR { { p.SetState(44) @@ -876,6 +926,10 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { var _m = p.Match(CELParserLOGICAL_OR) localctx.(*ConditionalOrContext).s9 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9) { @@ -883,18 +937,36 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { var _x = p.ConditionalAnd() + localctx.(*ConditionalOrContext)._conditionalAnd = _x } localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd) + p.SetState(50) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IConditionalAndContext is an interface to support dynamic dispatch. type IConditionalAndContext interface { antlr.ParserRuleContext @@ -903,34 +975,42 @@ type IConditionalAndContext interface { GetParser() antlr.Parser // GetS8 returns the s8 token. - GetS8() antlr.Token + GetS8() antlr.Token + // SetS8 sets the s8 token. - SetS8(antlr.Token) + SetS8(antlr.Token) + // GetOps returns the ops token list. GetOps() []antlr.Token + // SetOps sets the ops token list. SetOps([]antlr.Token) + // GetE returns the e rule contexts. GetE() IRelationContext // Get_relation returns the _relation rule contexts. Get_relation() IRelationContext + // SetE sets the e rule contexts. SetE(IRelationContext) // Set_relation sets the _relation rule contexts. Set_relation(IRelationContext) + // GetE1 returns the e1 rule context list. GetE1() []IRelationContext + // SetE1 sets the e1 rule context list. - SetE1([]IRelationContext) + SetE1([]IRelationContext) + // Getter signatures AllRelation() []IRelationContext @@ -943,28 +1023,33 @@ type IConditionalAndContext interface { } type ConditionalAndContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IRelationContext - s8 antlr.Token - ops []antlr.Token - _relation IRelationContext - e1 []IRelationContext + antlr.BaseParserRuleContext + parser antlr.Parser + e IRelationContext + s8 antlr.Token + ops []antlr.Token + _relation IRelationContext + e1 []IRelationContext } func NewEmptyConditionalAndContext() *ConditionalAndContext { var p = new(ConditionalAndContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalAnd return p } +func InitEmptyConditionalAndContext(p *ConditionalAndContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalAnd +} + func (*ConditionalAndContext) IsConditionalAndContext() {} func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext { var p = new(ConditionalAndContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_conditionalAnd @@ -976,24 +1061,32 @@ func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 } + func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } + func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } + func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } + func (s *ConditionalAndContext) GetE() IRelationContext { return s.e } func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation } + func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v } func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v } + func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } + func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } + func (s *ConditionalAndContext) AllRelation() []IRelationContext { children := s.GetChildren() len := 0 @@ -1016,12 +1109,12 @@ func (s *ConditionalAndContext) AllRelation() []IRelationContext { } func (s *ConditionalAndContext) Relation(i int) IRelationContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -1051,6 +1144,7 @@ func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Rec return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalAnd(s) @@ -1073,30 +1167,14 @@ func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface } } -func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { - this := p - _ = this + + +func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(51) @@ -1107,8 +1185,12 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { } p.SetState(56) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserLOGICAL_AND { { p.SetState(52) @@ -1116,6 +1198,10 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { var _m = p.Match(CELParserLOGICAL_AND) localctx.(*ConditionalAndContext).s8 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8) { @@ -1127,14 +1213,31 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { } localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation) + p.SetState(58) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IRelationContext is an interface to support dynamic dispatch. type IRelationContext interface { antlr.ParserRuleContext @@ -1143,10 +1246,12 @@ type IRelationContext interface { GetParser() antlr.Parser // GetOp returns the op token. - GetOp() antlr.Token + GetOp() antlr.Token + // SetOp sets the op token. - SetOp(antlr.Token) + SetOp(antlr.Token) + // Getter signatures Calc() ICalcContext @@ -1165,24 +1270,29 @@ type IRelationContext interface { } type RelationContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - op antlr.Token + op antlr.Token } func NewEmptyRelationContext() *RelationContext { var p = new(RelationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_relation return p } +func InitEmptyRelationContext(p *RelationContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_relation +} + func (*RelationContext) IsRelationContext() {} func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext { var p = new(RelationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_relation @@ -1194,13 +1304,15 @@ func (s *RelationContext) GetParser() antlr.Parser { return s.parser } func (s *RelationContext) GetOp() antlr.Token { return s.op } + func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } + func (s *RelationContext) Calc() ICalcContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1234,12 +1346,12 @@ func (s *RelationContext) AllRelation() []IRelationContext { } func (s *RelationContext) Relation(i int) IRelationContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -1289,6 +1401,7 @@ func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterRelation(s) @@ -1311,15 +1424,17 @@ func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + + + + func (p *CELParser) Relation() (localctx IRelationContext) { return p.relation(0) } func (p *CELParser) relation(_p int) (localctx IRelationContext) { - this := p - _ = this - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState) var _prevctx IRelationContext = localctx @@ -1328,22 +1443,6 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p) var _la int - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.EnterOuterAlt(localctx, 1) @@ -1355,8 +1454,13 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) p.SetState(67) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { if p.GetParseListeners() != nil { @@ -1368,7 +1472,8 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.SetState(62) if !(p.Precpred(p.GetParserRuleContext(), 1)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit } { p.SetState(63) @@ -1379,7 +1484,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { _la = p.GetTokenStream().LA(1) - if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&254) != 0) { + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) { var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*RelationContext).op = _ri @@ -1393,15 +1498,35 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.relation(2) } + } p.SetState(69) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // ICalcContext is an interface to support dynamic dispatch. type ICalcContext interface { antlr.ParserRuleContext @@ -1410,10 +1535,12 @@ type ICalcContext interface { GetParser() antlr.Parser // GetOp returns the op token. - GetOp() antlr.Token + GetOp() antlr.Token + // SetOp sets the op token. - SetOp(antlr.Token) + SetOp(antlr.Token) + // Getter signatures Unary() IUnaryContext @@ -1430,24 +1557,29 @@ type ICalcContext interface { } type CalcContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - op antlr.Token + op antlr.Token } func NewEmptyCalcContext() *CalcContext { var p = new(CalcContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_calc return p } +func InitEmptyCalcContext(p *CalcContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_calc +} + func (*CalcContext) IsCalcContext() {} func NewCalcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CalcContext { var p = new(CalcContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_calc @@ -1459,13 +1591,15 @@ func (s *CalcContext) GetParser() antlr.Parser { return s.parser } func (s *CalcContext) GetOp() antlr.Token { return s.op } + func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } + func (s *CalcContext) Unary() IUnaryContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IUnaryContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1499,12 +1633,12 @@ func (s *CalcContext) AllCalc() []ICalcContext { } func (s *CalcContext) Calc(i int) ICalcContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -1546,6 +1680,7 @@ func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCalc(s) @@ -1568,15 +1703,17 @@ func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + + + + func (p *CELParser) Calc() (localctx ICalcContext) { return p.calc(0) } func (p *CELParser) calc(_p int) (localctx ICalcContext) { - this := p - _ = this - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() localctx = NewCalcContext(p, p.GetParserRuleContext(), _parentState) var _prevctx ICalcContext = localctx @@ -1585,22 +1722,6 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.EnterRecursionRule(localctx, 10, CELParserRULE_calc, _p) var _la int - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.EnterOuterAlt(localctx, 1) @@ -1612,8 +1733,13 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) p.SetState(81) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { if p.GetParseListeners() != nil { @@ -1622,14 +1748,19 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { _prevctx = localctx p.SetState(79) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 4, p.GetParserRuleContext()) { case 1: localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) p.SetState(73) if !(p.Precpred(p.GetParserRuleContext(), 2)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit } { p.SetState(74) @@ -1640,7 +1771,7 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { _la = p.GetTokenStream().LA(1) - if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&58720256) != 0) { + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) { var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*CalcContext).op = _ri @@ -1654,13 +1785,15 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.calc(3) } + case 2: localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) p.SetState(76) if !(p.Precpred(p.GetParserRuleContext(), 1)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit } { p.SetState(77) @@ -1685,17 +1818,38 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.calc(2) } + case antlr.ATNInvalidAltNumber: + goto errorExit } } p.SetState(83) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IUnaryContext is an interface to support dynamic dispatch. type IUnaryContext interface { antlr.ParserRuleContext @@ -1707,23 +1861,28 @@ type IUnaryContext interface { } type UnaryContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyUnaryContext() *UnaryContext { var p = new(UnaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_unary return p } +func InitEmptyUnaryContext(p *UnaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_unary +} + func (*UnaryContext) IsUnaryContext() {} func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *UnaryContext { var p = new(UnaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_unary @@ -1733,8 +1892,8 @@ func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invoki func (s *UnaryContext) GetParser() antlr.Parser { return s.parser } -func (s *UnaryContext) CopyFrom(ctx *UnaryContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *UnaryContext) CopyAll(ctx *UnaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *UnaryContext) GetRuleContext() antlr.RuleContext { @@ -1745,8 +1904,11 @@ func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } + + + type LogicalNotContext struct { - *UnaryContext + UnaryContext s19 antlr.Token ops []antlr.Token } @@ -1754,19 +1916,23 @@ type LogicalNotContext struct { func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LogicalNotContext { var p = new(LogicalNotContext) - p.UnaryContext = NewEmptyUnaryContext() + InitEmptyUnaryContext(&p.UnaryContext) p.parser = parser - p.CopyFrom(ctx.(*UnaryContext)) + p.CopyAll(ctx.(*UnaryContext)) return p } + func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } + func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } + func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } + func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v } func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { @@ -1774,10 +1940,10 @@ func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { } func (s *LogicalNotContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1797,6 +1963,7 @@ func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode { return s.GetToken(CELParserEXCLAM, i) } + func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterLogicalNot(s) @@ -1819,16 +1986,17 @@ func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type MemberExprContext struct { - *UnaryContext + UnaryContext } func NewMemberExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberExprContext { var p = new(MemberExprContext) - p.UnaryContext = NewEmptyUnaryContext() + InitEmptyUnaryContext(&p.UnaryContext) p.parser = parser - p.CopyFrom(ctx.(*UnaryContext)) + p.CopyAll(ctx.(*UnaryContext)) return p } @@ -1838,10 +2006,10 @@ func (s *MemberExprContext) GetRuleContext() antlr.RuleContext { } func (s *MemberExprContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1853,6 +2021,7 @@ func (s *MemberExprContext) Member() IMemberContext { return t.(IMemberContext) } + func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberExpr(s) @@ -1875,8 +2044,9 @@ func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type NegateContext struct { - *UnaryContext + UnaryContext s18 antlr.Token ops []antlr.Token } @@ -1884,19 +2054,23 @@ type NegateContext struct { func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateContext { var p = new(NegateContext) - p.UnaryContext = NewEmptyUnaryContext() + InitEmptyUnaryContext(&p.UnaryContext) p.parser = parser - p.CopyFrom(ctx.(*UnaryContext)) + p.CopyAll(ctx.(*UnaryContext)) return p } + func (s *NegateContext) GetS18() antlr.Token { return s.s18 } + func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } + func (s *NegateContext) GetOps() []antlr.Token { return s.ops } + func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v } func (s *NegateContext) GetRuleContext() antlr.RuleContext { @@ -1904,10 +2078,10 @@ func (s *NegateContext) GetRuleContext() antlr.RuleContext { } func (s *NegateContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1927,6 +2101,7 @@ func (s *NegateContext) MINUS(i int) antlr.TerminalNode { return s.GetToken(CELParserMINUS, i) } + func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNegate(s) @@ -1949,35 +2124,22 @@ func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Unary() (localctx IUnaryContext) { - this := p - _ = this + +func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 12, CELParserRULE_unary) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.SetState(97) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 8, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 8, p.GetParserRuleContext()) { case 1: localctx = NewMemberExprContext(p, localctx) p.EnterOuterAlt(localctx, 1) @@ -1986,13 +2148,18 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { p.member(0) } + case 2: localctx = NewLogicalNotContext(p, localctx) p.EnterOuterAlt(localctx, 2) p.SetState(86) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for ok := true; ok; ok = _la == CELParserEXCLAM { { p.SetState(85) @@ -2000,11 +2167,19 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { var _m = p.Match(CELParserEXCLAM) localctx.(*LogicalNotContext).s19 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19) + p.SetState(88) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } { @@ -2012,42 +2187,71 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { p.member(0) } + case 3: localctx = NewNegateContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(92) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _alt = 1 for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { switch _alt { case 1: - { - p.SetState(91) + { + p.SetState(91) + + var _m = p.Match(CELParserMINUS) + + localctx.(*NegateContext).s18 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) + - var _m = p.Match(CELParserMINUS) - localctx.(*NegateContext).s18 = _m - } - localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + goto errorExit } p.SetState(94) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext()) + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } { p.SetState(96) p.member(0) } + case antlr.ATNInvalidAltNumber: + goto errorExit } + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IMemberContext is an interface to support dynamic dispatch. type IMemberContext interface { antlr.ParserRuleContext @@ -2059,23 +2263,28 @@ type IMemberContext interface { } type MemberContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyMemberContext() *MemberContext { var p = new(MemberContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_member return p } +func InitEmptyMemberContext(p *MemberContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_member +} + func (*MemberContext) IsMemberContext() {} func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MemberContext { var p = new(MemberContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_member @@ -2085,8 +2294,8 @@ func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invok func (s *MemberContext) GetParser() antlr.Parser { return s.parser } -func (s *MemberContext) CopyFrom(ctx *MemberContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *MemberContext) CopyAll(ctx *MemberContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *MemberContext) GetRuleContext() antlr.RuleContext { @@ -2097,38 +2306,46 @@ func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } + + + + type MemberCallContext struct { - *MemberContext - op antlr.Token - id antlr.Token + MemberContext + op antlr.Token + id antlr.Token open antlr.Token - args IExprListContext + args IExprListContext } func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext { var p = new(MemberCallContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } + func (s *MemberCallContext) GetOp() antlr.Token { return s.op } func (s *MemberCallContext) GetId() antlr.Token { return s.id } func (s *MemberCallContext) GetOpen() antlr.Token { return s.open } + func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v } func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v } func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v } + func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } + func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v } func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { @@ -2136,10 +2353,10 @@ func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { } func (s *MemberCallContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2168,10 +2385,10 @@ func (s *MemberCallContext) LPAREN() antlr.TerminalNode { } func (s *MemberCallContext) ExprList() IExprListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2183,6 +2400,7 @@ func (s *MemberCallContext) ExprList() IExprListContext { return t.(IExprListContext) } + func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberCall(s) @@ -2205,29 +2423,32 @@ func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type SelectContext struct { - *MemberContext - op antlr.Token + MemberContext + op antlr.Token opt antlr.Token - id antlr.Token + id antlr.Token } func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext { var p = new(SelectContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } + func (s *SelectContext) GetOp() antlr.Token { return s.op } func (s *SelectContext) GetOpt() antlr.Token { return s.opt } func (s *SelectContext) GetId() antlr.Token { return s.id } + func (s *SelectContext) SetOp(v antlr.Token) { s.op = v } func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v } @@ -2239,10 +2460,10 @@ func (s *SelectContext) GetRuleContext() antlr.RuleContext { } func (s *SelectContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2266,6 +2487,7 @@ func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } + func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterSelect(s) @@ -2288,16 +2510,17 @@ func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type PrimaryExprContext struct { - *MemberContext + MemberContext } func NewPrimaryExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *PrimaryExprContext { var p = new(PrimaryExprContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } @@ -2307,10 +2530,10 @@ func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext { } func (s *PrimaryExprContext) Primary() IPrimaryContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IPrimaryContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2322,6 +2545,7 @@ func (s *PrimaryExprContext) Primary() IPrimaryContext { return t.(IPrimaryContext) } + func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterPrimaryExpr(s) @@ -2344,33 +2568,38 @@ func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } + type IndexContext struct { - *MemberContext - op antlr.Token - opt antlr.Token - index IExprContext + MemberContext + op antlr.Token + opt antlr.Token + index IExprContext } func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext { var p = new(IndexContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } + func (s *IndexContext) GetOp() antlr.Token { return s.op } func (s *IndexContext) GetOpt() antlr.Token { return s.opt } + func (s *IndexContext) SetOp(v antlr.Token) { s.op = v } func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v } + func (s *IndexContext) GetIndex() IExprContext { return s.index } + func (s *IndexContext) SetIndex(v IExprContext) { s.index = v } func (s *IndexContext) GetRuleContext() antlr.RuleContext { @@ -2378,10 +2607,10 @@ func (s *IndexContext) GetRuleContext() antlr.RuleContext { } func (s *IndexContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2402,10 +2631,10 @@ func (s *IndexContext) LBRACKET() antlr.TerminalNode { } func (s *IndexContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2421,6 +2650,7 @@ func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } + func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterIndex(s) @@ -2443,15 +2673,15 @@ func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + + func (p *CELParser) Member() (localctx IMemberContext) { return p.member(0) } func (p *CELParser) member(_p int) (localctx IMemberContext) { - this := p - _ = this - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() localctx = NewMemberContext(p, p.GetParserRuleContext(), _parentState) var _prevctx IMemberContext = localctx @@ -2460,22 +2690,6 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { p.EnterRecursionRule(localctx, 14, CELParserRULE_member, _p) var _la int - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.EnterOuterAlt(localctx, 1) @@ -2491,8 +2705,13 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) p.SetState(126) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { if p.GetParseListeners() != nil { @@ -2501,14 +2720,19 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { _prevctx = localctx p.SetState(124) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 12, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 12, p.GetParserRuleContext()) { case 1: localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(102) if !(p.Precpred(p.GetParserRuleContext(), 3)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) + goto errorExit } { p.SetState(103) @@ -2516,11 +2740,19 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserDOT) localctx.(*SelectContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(105) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(104) @@ -2528,6 +2760,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*SelectContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -2537,15 +2773,21 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*SelectContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 2: localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(108) if !(p.Precpred(p.GetParserRuleContext(), 2)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit } { p.SetState(109) @@ -2553,6 +2795,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserDOT) localctx.(*MemberCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(110) @@ -2560,6 +2806,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*MemberCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(111) @@ -2567,17 +2817,26 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserLPAREN) localctx.(*MemberCallContext).open = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(113) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { { p.SetState(112) var _x = p.ExprList() + localctx.(*MemberCallContext).args = _x } @@ -2585,15 +2844,21 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { { p.SetState(115) p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 3: localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(116) if !(p.Precpred(p.GetParserRuleContext(), 1)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit } { p.SetState(117) @@ -2601,11 +2866,19 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserLBRACKET) localctx.(*IndexContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(119) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(118) @@ -2613,6 +2886,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*IndexContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -2621,24 +2898,50 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _x = p.Expr() + localctx.(*IndexContext).index = _x } { p.SetState(122) p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case antlr.ATNInvalidAltNumber: + goto errorExit } } p.SetState(128) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IPrimaryContext is an interface to support dynamic dispatch. type IPrimaryContext interface { antlr.ParserRuleContext @@ -2650,23 +2953,28 @@ type IPrimaryContext interface { } type PrimaryContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyPrimaryContext() *PrimaryContext { var p = new(PrimaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_primary return p } +func InitEmptyPrimaryContext(p *PrimaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_primary +} + func (*PrimaryContext) IsPrimaryContext() {} func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PrimaryContext { var p = new(PrimaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_primary @@ -2676,8 +2984,8 @@ func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invo func (s *PrimaryContext) GetParser() antlr.Parser { return s.parser } -func (s *PrimaryContext) CopyFrom(ctx *PrimaryContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *PrimaryContext) CopyAll(ctx *PrimaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *PrimaryContext) GetRuleContext() antlr.RuleContext { @@ -2688,28 +2996,35 @@ func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } + + + type CreateListContext struct { - *PrimaryContext - op antlr.Token - elems IListInitContext + PrimaryContext + op antlr.Token + elems IListInitContext } func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext { var p = new(CreateListContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *CreateListContext) GetOp() antlr.Token { return s.op } + func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateListContext) GetElems() IListInitContext { return s.elems } + func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v } func (s *CreateListContext) GetRuleContext() antlr.RuleContext { @@ -2729,10 +3044,10 @@ func (s *CreateListContext) COMMA() antlr.TerminalNode { } func (s *CreateListContext) ListInit() IListInitContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IListInitContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2744,6 +3059,7 @@ func (s *CreateListContext) ListInit() IListInitContext { return t.(IListInitContext) } + func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateList(s) @@ -2766,28 +3082,33 @@ func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type CreateStructContext struct { - *PrimaryContext - op antlr.Token - entries IMapInitializerListContext + PrimaryContext + op antlr.Token + entries IMapInitializerListContext } func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext { var p = new(CreateStructContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *CreateStructContext) GetOp() antlr.Token { return s.op } + func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } + func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v } func (s *CreateStructContext) GetRuleContext() antlr.RuleContext { @@ -2807,10 +3128,10 @@ func (s *CreateStructContext) COMMA() antlr.TerminalNode { } func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMapInitializerListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2822,6 +3143,7 @@ func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { return t.(IMapInitializerListContext) } + func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateStruct(s) @@ -2844,16 +3166,17 @@ func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } + type ConstantLiteralContext struct { - *PrimaryContext + PrimaryContext } func NewConstantLiteralContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ConstantLiteralContext { var p = new(ConstantLiteralContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } @@ -2863,10 +3186,10 @@ func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext { } func (s *ConstantLiteralContext) Literal() ILiteralContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(ILiteralContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2878,6 +3201,7 @@ func (s *ConstantLiteralContext) Literal() ILiteralContext { return t.(ILiteralContext) } + func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConstantLiteral(s) @@ -2900,23 +3224,26 @@ func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interfac } } + type NestedContext struct { - *PrimaryContext - e IExprContext + PrimaryContext + e IExprContext } func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext { var p = new(NestedContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *NestedContext) GetE() IExprContext { return s.e } + func (s *NestedContext) SetE(v IExprContext) { s.e = v } func (s *NestedContext) GetRuleContext() antlr.RuleContext { @@ -2932,10 +3259,10 @@ func (s *NestedContext) RPAREN() antlr.TerminalNode { } func (s *NestedContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2947,6 +3274,7 @@ func (s *NestedContext) Expr() IExprContext { return t.(IExprContext) } + func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNested(s) @@ -2969,27 +3297,29 @@ func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type CreateMessageContext struct { - *PrimaryContext - leadingDot antlr.Token + PrimaryContext + leadingDot antlr.Token _IDENTIFIER antlr.Token - ids []antlr.Token - s16 antlr.Token - ops []antlr.Token - op antlr.Token - entries IFieldInitializerListContext + ids []antlr.Token + s16 antlr.Token + ops []antlr.Token + op antlr.Token + entries IFieldInitializerListContext } func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext { var p = new(CreateMessageContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER } @@ -2998,6 +3328,7 @@ func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 } func (s *CreateMessageContext) GetOp() antlr.Token { return s.op } + func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v } @@ -3006,16 +3337,20 @@ func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v } func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids } func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops } + func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v } func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v } + func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } + func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v } func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext { @@ -3051,10 +3386,10 @@ func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode { } func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IFieldInitializerListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -3066,6 +3401,7 @@ func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListConte return t.(IFieldInitializerListContext) } + func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateMessage(s) @@ -3088,38 +3424,43 @@ func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } + type IdentOrGlobalCallContext struct { - *PrimaryContext + PrimaryContext leadingDot antlr.Token - id antlr.Token - op antlr.Token - args IExprListContext + id antlr.Token + op antlr.Token + args IExprListContext } func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext { var p = new(IdentOrGlobalCallContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id } func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op } + func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v } func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v } + func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args } + func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v } func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext { @@ -3143,10 +3484,10 @@ func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode { } func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -3158,6 +3499,7 @@ func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { return t.(IExprListContext) } + func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterIdentOrGlobalCall(s) @@ -3180,40 +3522,31 @@ func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interf } } -func (p *CELParser) Primary() (localctx IPrimaryContext) { - this := p - _ = this + +func (p *CELParser) Primary() (localctx IPrimaryContext) { localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 16, CELParserRULE_primary) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.SetState(180) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 25, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) { case 1: localctx = NewIdentOrGlobalCallContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(130) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserDOT { { p.SetState(129) @@ -3221,6 +3554,10 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserDOT) localctx.(*IdentOrGlobalCallContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -3230,28 +3567,42 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*IdentOrGlobalCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(138) p.GetErrorHandler().Sync(p) - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { + + if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { { p.SetState(133) var _m = p.Match(CELParserLPAREN) localctx.(*IdentOrGlobalCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(135) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { { p.SetState(134) var _x = p.ExprList() + localctx.(*IdentOrGlobalCallContext).args = _x } @@ -3259,29 +3610,46 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { { p.SetState(137) p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + } else if p.HasError() { // JIM + goto errorExit } + case 2: localctx = NewNestedContext(p, localctx) p.EnterOuterAlt(localctx, 2) { p.SetState(140) p.Match(CELParserLPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(141) var _x = p.Expr() + localctx.(*NestedContext).e = _x } { p.SetState(142) p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 3: localctx = NewCreateListContext(p, localctx) p.EnterOuterAlt(localctx, 3) @@ -3291,37 +3659,59 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserLBRACKET) localctx.(*CreateListContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(146) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { { p.SetState(145) var _x = p.ListInit() + localctx.(*CreateListContext).elems = _x } } p.SetState(149) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserCOMMA { { p.SetState(148) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(151) p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 4: localctx = NewCreateStructContext(p, localctx) p.EnterOuterAlt(localctx, 4) @@ -3331,44 +3721,70 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserLBRACE) localctx.(*CreateStructContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(154) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { { p.SetState(153) var _x = p.MapInitializerList() + localctx.(*CreateStructContext).entries = _x } } p.SetState(157) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserCOMMA { { p.SetState(156) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(159) p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 5: localctx = NewCreateMessageContext(p, localctx) p.EnterOuterAlt(localctx, 5) p.SetState(161) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserDOT { { p.SetState(160) @@ -3376,6 +3792,10 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserDOT) localctx.(*CreateMessageContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -3385,12 +3805,20 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) p.SetState(168) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserDOT { { p.SetState(164) @@ -3398,6 +3826,10 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserDOT) localctx.(*CreateMessageContext).s16 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16) { @@ -3406,11 +3838,19 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) + p.SetState(170) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } { @@ -3419,37 +3859,59 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserLBRACE) localctx.(*CreateMessageContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(173) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER { { p.SetState(172) var _x = p.FieldInitializerList() + localctx.(*CreateMessageContext).entries = _x } } p.SetState(176) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserCOMMA { { p.SetState(175) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(178) p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 6: localctx = NewConstantLiteralContext(p, localctx) p.EnterOuterAlt(localctx, 6) @@ -3458,11 +3920,25 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.Literal() } + case antlr.ATNInvalidAltNumber: + goto errorExit } + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IExprListContext is an interface to support dynamic dispatch. type IExprListContext interface { antlr.ParserRuleContext @@ -3473,14 +3949,18 @@ type IExprListContext interface { // Get_expr returns the _expr rule contexts. Get_expr() IExprContext + // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) + // GetE returns the e rule context list. GetE() []IExprContext + // SetE sets the e rule context list. - SetE([]IExprContext) + SetE([]IExprContext) + // Getter signatures AllExpr() []IExprContext @@ -3493,25 +3973,30 @@ type IExprListContext interface { } type ExprListContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - _expr IExprContext - e []IExprContext + _expr IExprContext + e []IExprContext } func NewEmptyExprListContext() *ExprListContext { var p = new(ExprListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_exprList return p } +func InitEmptyExprListContext(p *ExprListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_exprList +} + func (*ExprListContext) IsExprListContext() {} func NewExprListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprListContext { var p = new(ExprListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_exprList @@ -3523,12 +4008,16 @@ func (s *ExprListContext) GetParser() antlr.Parser { return s.parser } func (s *ExprListContext) Get_expr() IExprContext { return s._expr } + func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } + func (s *ExprListContext) GetE() []IExprContext { return s.e } + func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } + func (s *ExprListContext) AllExpr() []IExprContext { children := s.GetChildren() len := 0 @@ -3551,12 +4040,12 @@ func (s *ExprListContext) AllExpr() []IExprContext { } func (s *ExprListContext) Expr(i int) IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -3586,6 +4075,7 @@ func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExprList(s) @@ -3608,65 +4098,76 @@ func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) ExprList() (localctx IExprListContext) { - this := p - _ = this + + +func (p *CELParser) ExprList() (localctx IExprListContext) { localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 18, CELParserRULE_exprList) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(182) var _x = p.Expr() + localctx.(*ExprListContext)._expr = _x } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) p.SetState(187) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserCOMMA { { p.SetState(183) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(184) var _x = p.Expr() + localctx.(*ExprListContext)._expr = _x } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) + p.SetState(189) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IListInitContext is an interface to support dynamic dispatch. type IListInitContext interface { antlr.ParserRuleContext @@ -3677,14 +4178,18 @@ type IListInitContext interface { // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext + // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) + // GetElems returns the elems rule context list. GetElems() []IOptExprContext + // SetElems sets the elems rule context list. - SetElems([]IOptExprContext) + SetElems([]IOptExprContext) + // Getter signatures AllOptExpr() []IOptExprContext @@ -3697,25 +4202,30 @@ type IListInitContext interface { } type ListInitContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - _optExpr IOptExprContext - elems []IOptExprContext + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + elems []IOptExprContext } func NewEmptyListInitContext() *ListInitContext { var p = new(ListInitContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_listInit return p } +func InitEmptyListInitContext(p *ListInitContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_listInit +} + func (*ListInitContext) IsListInitContext() {} func NewListInitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ListInitContext { var p = new(ListInitContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_listInit @@ -3727,12 +4237,16 @@ func (s *ListInitContext) GetParser() antlr.Parser { return s.parser } func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr } + func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } + func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } + func (s *ListInitContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -3755,12 +4269,12 @@ func (s *ListInitContext) AllOptExpr() []IOptExprContext { } func (s *ListInitContext) OptExpr(i int) IOptExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -3790,6 +4304,7 @@ func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterListInit(s) @@ -3812,29 +4327,12 @@ func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) ListInit() (localctx IListInitContext) { - this := p - _ = this - localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 20, CELParserRULE_listInit) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) ListInit() (localctx IListInitContext) { + localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 20, CELParserRULE_listInit) var _alt int p.EnterOuterAlt(localctx, 1) @@ -3843,37 +4341,68 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { var _x = p.OptExpr() + localctx.(*ListInitContext)._optExpr = _x } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) p.SetState(195) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { p.SetState(191) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(192) var _x = p.OptExpr() + localctx.(*ListInitContext)._optExpr = _x } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) + } p.SetState(197) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IFieldInitializerListContext is an interface to support dynamic dispatch. type IFieldInitializerListContext interface { antlr.ParserRuleContext @@ -3882,40 +4411,48 @@ type IFieldInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. - GetS21() antlr.Token + GetS21() antlr.Token + // SetS21 sets the s21 token. - SetS21(antlr.Token) + SetS21(antlr.Token) + // GetCols returns the cols token list. GetCols() []antlr.Token + // SetCols sets the cols token list. SetCols([]antlr.Token) + // Get_optField returns the _optField rule contexts. Get_optField() IOptFieldContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext + // Set_optField sets the _optField rule contexts. Set_optField(IOptFieldContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) + // GetFields returns the fields rule context list. GetFields() []IOptFieldContext // GetValues returns the values rule context list. GetValues() []IExprContext + // SetFields sets the fields rule context list. - SetFields([]IOptFieldContext) + SetFields([]IOptFieldContext) // SetValues sets the values rule context list. - SetValues([]IExprContext) + SetValues([]IExprContext) + // Getter signatures AllOptField() []IOptFieldContext @@ -3932,29 +4469,34 @@ type IFieldInitializerListContext interface { } type FieldInitializerListContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - _optField IOptFieldContext - fields []IOptFieldContext - s21 antlr.Token - cols []antlr.Token - _expr IExprContext - values []IExprContext + antlr.BaseParserRuleContext + parser antlr.Parser + _optField IOptFieldContext + fields []IOptFieldContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext } func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { var p = new(FieldInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_fieldInitializerList return p } +func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_fieldInitializerList +} + func (*FieldInitializerListContext) IsFieldInitializerListContext() {} func NewFieldInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FieldInitializerListContext { var p = new(FieldInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_fieldInitializerList @@ -3966,28 +4508,36 @@ func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 } + func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } + func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField } func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr } + func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v } func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields } func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values } + func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v } func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v } + func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { children := s.GetChildren() len := 0 @@ -4010,12 +4560,12 @@ func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { } func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptFieldContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4059,12 +4609,12 @@ func (s *FieldInitializerListContext) AllExpr() []IExprContext { } func (s *FieldInitializerListContext) Expr(i int) IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4094,6 +4644,7 @@ func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog ant return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterFieldInitializerList(s) @@ -4116,29 +4667,12 @@ func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) int } } -func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { - this := p - _ = this - localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { + localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) var _alt int p.EnterOuterAlt(localctx, 1) @@ -4147,6 +4681,7 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.OptField() + localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) @@ -4156,6 +4691,10 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { @@ -4163,24 +4702,35 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.Expr() + localctx.(*FieldInitializerListContext)._expr = _x } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) p.SetState(208) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { p.SetState(201) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(202) var _x = p.OptField() + localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) @@ -4190,6 +4740,10 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { @@ -4197,19 +4751,40 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.Expr() + localctx.(*FieldInitializerListContext)._expr = _x } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) + } p.SetState(210) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IOptFieldContext is an interface to support dynamic dispatch. type IOptFieldContext interface { antlr.ParserRuleContext @@ -4218,10 +4793,12 @@ type IOptFieldContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. - GetOpt() antlr.Token + GetOpt() antlr.Token + // SetOpt sets the opt token. - SetOpt(antlr.Token) + SetOpt(antlr.Token) + // Getter signatures IDENTIFIER() antlr.TerminalNode @@ -4232,24 +4809,29 @@ type IOptFieldContext interface { } type OptFieldContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - opt antlr.Token + opt antlr.Token } func NewEmptyOptFieldContext() *OptFieldContext { var p = new(OptFieldContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optField return p } +func InitEmptyOptFieldContext(p *OptFieldContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optField +} + func (*OptFieldContext) IsOptFieldContext() {} func NewOptFieldContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptFieldContext { var p = new(OptFieldContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_optField @@ -4261,8 +4843,10 @@ func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser } func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt } + func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } + func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode { return s.GetToken(CELParserIDENTIFIER, 0) } @@ -4279,6 +4863,7 @@ func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptField(s) @@ -4301,35 +4886,23 @@ func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) OptField() (localctx IOptFieldContext) { - this := p - _ = this + + +func (p *CELParser) OptField() (localctx IOptFieldContext) { localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 24, CELParserRULE_optField) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) p.SetState(212) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(211) @@ -4337,17 +4910,38 @@ func (p *CELParser) OptField() (localctx IOptFieldContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptFieldContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(214) p.Match(CELParserIDENTIFIER) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IMapInitializerListContext is an interface to support dynamic dispatch. type IMapInitializerListContext interface { antlr.ParserRuleContext @@ -4356,40 +4950,48 @@ type IMapInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. - GetS21() antlr.Token + GetS21() antlr.Token + // SetS21 sets the s21 token. - SetS21(antlr.Token) + SetS21(antlr.Token) + // GetCols returns the cols token list. GetCols() []antlr.Token + // SetCols sets the cols token list. SetCols([]antlr.Token) + // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext + // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) + // GetKeys returns the keys rule context list. GetKeys() []IOptExprContext // GetValues returns the values rule context list. GetValues() []IExprContext + // SetKeys sets the keys rule context list. - SetKeys([]IOptExprContext) + SetKeys([]IOptExprContext) // SetValues sets the values rule context list. - SetValues([]IExprContext) + SetValues([]IExprContext) + // Getter signatures AllOptExpr() []IOptExprContext @@ -4406,29 +5008,34 @@ type IMapInitializerListContext interface { } type MapInitializerListContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - _optExpr IOptExprContext - keys []IOptExprContext - s21 antlr.Token - cols []antlr.Token - _expr IExprContext - values []IExprContext + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + keys []IOptExprContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext } func NewEmptyMapInitializerListContext() *MapInitializerListContext { var p = new(MapInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_mapInitializerList return p } +func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_mapInitializerList +} + func (*MapInitializerListContext) IsMapInitializerListContext() {} func NewMapInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MapInitializerListContext { var p = new(MapInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_mapInitializerList @@ -4440,28 +5047,36 @@ func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser } func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 } + func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } + func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr } func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr } + func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys } func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values } + func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v } func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v } + func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -4484,12 +5099,12 @@ func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { } func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4533,12 +5148,12 @@ func (s *MapInitializerListContext) AllExpr() []IExprContext { } func (s *MapInitializerListContext) Expr(i int) IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4568,6 +5183,7 @@ func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMapInitializerList(s) @@ -4590,29 +5206,12 @@ func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) inter } } -func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { - this := p - _ = this - localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { + localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) var _alt int p.EnterOuterAlt(localctx, 1) @@ -4621,6 +5220,7 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.OptExpr() + localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) @@ -4630,6 +5230,10 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { @@ -4637,24 +5241,35 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.Expr() + localctx.(*MapInitializerListContext)._expr = _x } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) p.SetState(226) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { p.SetState(219) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(220) var _x = p.OptExpr() + localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) @@ -4664,6 +5279,10 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { @@ -4671,19 +5290,40 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.Expr() + localctx.(*MapInitializerListContext)._expr = _x } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) + } p.SetState(228) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IOptExprContext is an interface to support dynamic dispatch. type IOptExprContext interface { antlr.ParserRuleContext @@ -4692,17 +5332,21 @@ type IOptExprContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. - GetOpt() antlr.Token + GetOpt() antlr.Token + // SetOpt sets the opt token. - SetOpt(antlr.Token) + SetOpt(antlr.Token) + // GetE returns the e rule contexts. GetE() IExprContext + // SetE sets the e rule contexts. SetE(IExprContext) + // Getter signatures Expr() IExprContext QUESTIONMARK() antlr.TerminalNode @@ -4712,25 +5356,30 @@ type IOptExprContext interface { } type OptExprContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - opt antlr.Token - e IExprContext + opt antlr.Token + e IExprContext } func NewEmptyOptExprContext() *OptExprContext { var p = new(OptExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optExpr return p } +func InitEmptyOptExprContext(p *OptExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optExpr +} + func (*OptExprContext) IsOptExprContext() {} func NewOptExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptExprContext { var p = new(OptExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_optExpr @@ -4742,17 +5391,21 @@ func (s *OptExprContext) GetParser() antlr.Parser { return s.parser } func (s *OptExprContext) GetOpt() antlr.Token { return s.opt } + func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } + func (s *OptExprContext) GetE() IExprContext { return s.e } + func (s *OptExprContext) SetE(v IExprContext) { s.e = v } + func (s *OptExprContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -4776,6 +5429,7 @@ func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptExpr(s) @@ -4798,35 +5452,23 @@ func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) OptExpr() (localctx IOptExprContext) { - this := p - _ = this + + +func (p *CELParser) OptExpr() (localctx IOptExprContext) { localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 28, CELParserRULE_optExpr) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) p.SetState(230) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(229) @@ -4834,6 +5476,10 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptExprContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -4842,12 +5488,26 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { var _x = p.Expr() + localctx.(*OptExprContext).e = _x } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // ILiteralContext is an interface to support dynamic dispatch. type ILiteralContext interface { antlr.ParserRuleContext @@ -4859,23 +5519,28 @@ type ILiteralContext interface { } type LiteralContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyLiteralContext() *LiteralContext { var p = new(LiteralContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_literal return p } +func InitEmptyLiteralContext(p *LiteralContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_literal +} + func (*LiteralContext) IsLiteralContext() {} func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *LiteralContext { var p = new(LiteralContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_literal @@ -4885,8 +5550,8 @@ func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invo func (s *LiteralContext) GetParser() antlr.Parser { return s.parser } -func (s *LiteralContext) CopyFrom(ctx *LiteralContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *LiteralContext) CopyAll(ctx *LiteralContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *LiteralContext) GetRuleContext() antlr.RuleContext { @@ -4897,23 +5562,28 @@ func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } + + + type BytesContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesContext { var p = new(BytesContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *BytesContext) GetTok() antlr.Token { return s.tok } + func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v } func (s *BytesContext) GetRuleContext() antlr.RuleContext { @@ -4924,6 +5594,7 @@ func (s *BytesContext) BYTES() antlr.TerminalNode { return s.GetToken(CELParserBYTES, 0) } + func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBytes(s) @@ -4946,23 +5617,26 @@ func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type UintContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintContext { var p = new(UintContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *UintContext) GetTok() antlr.Token { return s.tok } + func (s *UintContext) SetTok(v antlr.Token) { s.tok = v } func (s *UintContext) GetRuleContext() antlr.RuleContext { @@ -4973,6 +5647,7 @@ func (s *UintContext) NUM_UINT() antlr.TerminalNode { return s.GetToken(CELParserNUM_UINT, 0) } + func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterUint(s) @@ -4995,23 +5670,26 @@ func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type NullContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullContext { var p = new(NullContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *NullContext) GetTok() antlr.Token { return s.tok } + func (s *NullContext) SetTok(v antlr.Token) { s.tok = v } func (s *NullContext) GetRuleContext() antlr.RuleContext { @@ -5022,6 +5700,7 @@ func (s *NullContext) NUL() antlr.TerminalNode { return s.GetToken(CELParserNUL, 0) } + func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNull(s) @@ -5044,23 +5723,26 @@ func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type BoolFalseContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolFalseContext { var p = new(BoolFalseContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } + func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext { @@ -5071,6 +5753,7 @@ func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode { return s.GetToken(CELParserCEL_FALSE, 0) } + func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolFalse(s) @@ -5093,23 +5776,26 @@ func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type StringContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringContext { var p = new(StringContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *StringContext) GetTok() antlr.Token { return s.tok } + func (s *StringContext) SetTok(v antlr.Token) { s.tok = v } func (s *StringContext) GetRuleContext() antlr.RuleContext { @@ -5120,6 +5806,7 @@ func (s *StringContext) STRING() antlr.TerminalNode { return s.GetToken(CELParserSTRING, 0) } + func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterString(s) @@ -5142,26 +5829,29 @@ func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type DoubleContext struct { - *LiteralContext + LiteralContext sign antlr.Token - tok antlr.Token + tok antlr.Token } func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext { var p = new(DoubleContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *DoubleContext) GetSign() antlr.Token { return s.sign } func (s *DoubleContext) GetTok() antlr.Token { return s.tok } + func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v } func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v } @@ -5178,6 +5868,7 @@ func (s *DoubleContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } + func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterDouble(s) @@ -5200,23 +5891,26 @@ func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type BoolTrueContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolTrueContext { var p = new(BoolTrueContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } + func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext { @@ -5227,6 +5921,7 @@ func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode { return s.GetToken(CELParserCEL_TRUE, 0) } + func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolTrue(s) @@ -5249,26 +5944,29 @@ func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type IntContext struct { - *LiteralContext + LiteralContext sign antlr.Token - tok antlr.Token + tok antlr.Token } func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext { var p = new(IntContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *IntContext) GetSign() antlr.Token { return s.sign } func (s *IntContext) GetTok() antlr.Token { return s.tok } + func (s *IntContext) SetSign(v antlr.Token) { s.sign = v } func (s *IntContext) SetTok(v antlr.Token) { s.tok = v } @@ -5285,6 +5983,7 @@ func (s *IntContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } + func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterInt(s) @@ -5307,40 +6006,31 @@ func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Literal() (localctx ILiteralContext) { - this := p - _ = this + +func (p *CELParser) Literal() (localctx ILiteralContext) { localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 30, CELParserRULE_literal) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.SetState(248) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 34, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) { case 1: localctx = NewIntContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(235) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserMINUS { { p.SetState(234) @@ -5348,6 +6038,10 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserMINUS) localctx.(*IntContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -5357,8 +6051,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUM_INT) localctx.(*IntContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 2: localctx = NewUintContext(p, localctx) p.EnterOuterAlt(localctx, 2) @@ -5368,15 +6067,24 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUM_UINT) localctx.(*UintContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 3: localctx = NewDoubleContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(240) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserMINUS { { p.SetState(239) @@ -5384,6 +6092,10 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserMINUS) localctx.(*DoubleContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -5393,8 +6105,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUM_FLOAT) localctx.(*DoubleContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 4: localctx = NewStringContext(p, localctx) p.EnterOuterAlt(localctx, 4) @@ -5404,8 +6121,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserSTRING) localctx.(*StringContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 5: localctx = NewBytesContext(p, localctx) p.EnterOuterAlt(localctx, 5) @@ -5415,8 +6137,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserBYTES) localctx.(*BytesContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 6: localctx = NewBoolTrueContext(p, localctx) p.EnterOuterAlt(localctx, 6) @@ -5426,8 +6153,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserCEL_TRUE) localctx.(*BoolTrueContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 7: localctx = NewBoolFalseContext(p, localctx) p.EnterOuterAlt(localctx, 7) @@ -5437,8 +6169,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserCEL_FALSE) localctx.(*BoolFalseContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 8: localctx = NewNullContext(p, localctx) p.EnterOuterAlt(localctx, 8) @@ -5448,35 +6185,48 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUL) localctx.(*NullContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case antlr.ATNInvalidAltNumber: + goto errorExit } + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { switch ruleIndex { case 4: - var t *RelationContext = nil - if localctx != nil { - t = localctx.(*RelationContext) - } - return p.Relation_Sempred(t, predIndex) + var t *RelationContext = nil + if localctx != nil { t = localctx.(*RelationContext) } + return p.Relation_Sempred(t, predIndex) case 5: - var t *CalcContext = nil - if localctx != nil { - t = localctx.(*CalcContext) - } - return p.Calc_Sempred(t, predIndex) + var t *CalcContext = nil + if localctx != nil { t = localctx.(*CalcContext) } + return p.Calc_Sempred(t, predIndex) case 7: - var t *MemberContext = nil - if localctx != nil { - t = localctx.(*MemberContext) - } - return p.Member_Sempred(t, predIndex) + var t *MemberContext = nil + if localctx != nil { t = localctx.(*MemberContext) } + return p.Member_Sempred(t, predIndex) + default: panic("No predicate with index: " + fmt.Sprint(ruleIndex)) @@ -5484,12 +6234,9 @@ func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int } func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool { - this := p - _ = this - switch predIndex { case 0: - return p.Precpred(p.GetParserRuleContext(), 1) + return p.Precpred(p.GetParserRuleContext(), 1) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -5497,15 +6244,12 @@ func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) } func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool { - this := p - _ = this - switch predIndex { case 1: - return p.Precpred(p.GetParserRuleContext(), 2) + return p.Precpred(p.GetParserRuleContext(), 2) case 2: - return p.Precpred(p.GetParserRuleContext(), 1) + return p.Precpred(p.GetParserRuleContext(), 1) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -5513,20 +6257,18 @@ func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool } func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool { - this := p - _ = this - switch predIndex { case 3: - return p.Precpred(p.GetParserRuleContext(), 3) + return p.Precpred(p.GetParserRuleContext(), 3) case 4: - return p.Precpred(p.GetParserRuleContext(), 2) + return p.Precpred(p.GetParserRuleContext(), 2) case 5: - return p.Precpred(p.GetParserRuleContext(), 1) + return p.Precpred(p.GetParserRuleContext(), 1) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) } } + diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go index 2c54e2cb02..d2fbd563ab 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go @@ -1,7 +1,8 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" + // A complete Visitor for a parse tree produced by CELParser. type CELVisitor interface { @@ -105,4 +106,5 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#Null. VisitNull(ctx *NullContext) interface{} -} + +} \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/parser/gen/generate.sh b/vendor/github.com/google/cel-go/parser/gen/generate.sh index 389107c6a5..27a9559f7c 100644 --- a/vendor/github.com/google/cel-go/parser/gen/generate.sh +++ b/vendor/github.com/google/cel-go/parser/gen/generate.sh @@ -27,7 +27,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Generate AntLR artifacts. -java -Xmx500M -cp ${DIR}/antlr-4.12.0-complete.jar org.antlr.v4.Tool \ +java -Xmx500M -cp ${DIR}/antlr-4.13.1-complete.jar org.antlr.v4.Tool \ -Dlanguage=Go \ -package gen \ -o ${DIR} \ diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go index a5f29e3d7a..96748358eb 100644 --- a/vendor/github.com/google/cel-go/parser/helper.go +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -17,284 +17,217 @@ package parser import ( "sync" - antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4" + antlr "github.com/antlr4-go/antlr/v4" "github.com/google/cel-go/common" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" ) type parserHelper struct { - source common.Source - nextID int64 - positions map[int64]int32 - macroCalls map[int64]*exprpb.Expr + exprFactory ast.ExprFactory + source common.Source + sourceInfo *ast.SourceInfo + nextID int64 } -func newParserHelper(source common.Source) *parserHelper { +func newParserHelper(source common.Source, fac ast.ExprFactory) *parserHelper { return &parserHelper{ - source: source, - nextID: 1, - positions: make(map[int64]int32), - macroCalls: make(map[int64]*exprpb.Expr), + exprFactory: fac, + source: source, + sourceInfo: ast.NewSourceInfo(source), + nextID: 1, } } -func (p *parserHelper) getSourceInfo() *exprpb.SourceInfo { - return &exprpb.SourceInfo{ - Location: p.source.Description(), - Positions: p.positions, - LineOffsets: p.source.LineOffsets(), - MacroCalls: p.macroCalls} +func (p *parserHelper) getSourceInfo() *ast.SourceInfo { + return p.sourceInfo } -func (p *parserHelper) newLiteral(ctx any, value *exprpb.Constant) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: value} - return exprNode +func (p *parserHelper) newLiteral(ctx any, value ref.Val) ast.Expr { + return p.exprFactory.NewLiteral(p.newID(ctx), value) } -func (p *parserHelper) newLiteralBool(ctx any, value bool) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: value}}) +func (p *parserHelper) newLiteralBool(ctx any, value bool) ast.Expr { + return p.newLiteral(ctx, types.Bool(value)) } -func (p *parserHelper) newLiteralString(ctx any, value string) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: value}}) +func (p *parserHelper) newLiteralString(ctx any, value string) ast.Expr { + return p.newLiteral(ctx, types.String(value)) } -func (p *parserHelper) newLiteralBytes(ctx any, value []byte) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: value}}) +func (p *parserHelper) newLiteralBytes(ctx any, value []byte) ast.Expr { + return p.newLiteral(ctx, types.Bytes(value)) } -func (p *parserHelper) newLiteralInt(ctx any, value int64) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: value}}) +func (p *parserHelper) newLiteralInt(ctx any, value int64) ast.Expr { + return p.newLiteral(ctx, types.Int(value)) } -func (p *parserHelper) newLiteralUint(ctx any, value uint64) *exprpb.Expr { - return p.newLiteral(ctx, &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: value}}) +func (p *parserHelper) newLiteralUint(ctx any, value uint64) ast.Expr { + return p.newLiteral(ctx, types.Uint(value)) } -func (p *parserHelper) newLiteralDouble(ctx any, value float64) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: value}}) +func (p *parserHelper) newLiteralDouble(ctx any, value float64) ast.Expr { + return p.newLiteral(ctx, types.Double(value)) } -func (p *parserHelper) newIdent(ctx any, name string) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: &exprpb.Expr_Ident{Name: name}} - return exprNode +func (p *parserHelper) newIdent(ctx any, name string) ast.Expr { + return p.exprFactory.NewIdent(p.newID(ctx), name) } -func (p *parserHelper) newSelect(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_SelectExpr{ - SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field}} - return exprNode +func (p *parserHelper) newSelect(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewSelect(p.newID(ctx), operand, field) } -func (p *parserHelper) newPresenceTest(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_SelectExpr{ - SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field, TestOnly: true}} - return exprNode +func (p *parserHelper) newPresenceTest(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewPresenceTest(p.newID(ctx), operand, field) } -func (p *parserHelper) newGlobalCall(ctx any, function string, args ...*exprpb.Expr) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{Function: function, Args: args}} - return exprNode +func (p *parserHelper) newGlobalCall(ctx any, function string, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewCall(p.newID(ctx), function, args...) } -func (p *parserHelper) newReceiverCall(ctx any, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{Function: function, Target: target, Args: args}} - return exprNode +func (p *parserHelper) newReceiverCall(ctx any, function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewMemberCall(p.newID(ctx), function, target, args...) } -func (p *parserHelper) newList(ctx any, elements []*exprpb.Expr, optionals ...int32) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: elements, - OptionalIndices: optionals, - }} - return exprNode +func (p *parserHelper) newList(ctx any, elements []ast.Expr, optionals ...int32) ast.Expr { + return p.exprFactory.NewList(p.newID(ctx), elements, optionals) } -func (p *parserHelper) newMap(ctx any, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{Entries: entries}} - return exprNode +func (p *parserHelper) newMap(ctx any, entries ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewMap(p.newID(ctx), entries) } -func (p *parserHelper) newMapEntry(entryID int64, key *exprpb.Expr, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return &exprpb.Expr_CreateStruct_Entry{ - Id: entryID, - KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{MapKey: key}, - Value: value, - OptionalEntry: optional, - } +func (p *parserHelper) newMapEntry(entryID int64, key ast.Expr, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewMapEntry(entryID, key, value, optional) } -func (p *parserHelper) newObject(ctx any, typeName string, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - MessageName: typeName, - Entries: entries, - }, - } - return exprNode +func (p *parserHelper) newObject(ctx any, typeName string, fields ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewStruct(p.newID(ctx), typeName, fields) } -func (p *parserHelper) newObjectField(fieldID int64, field string, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return &exprpb.Expr_CreateStruct_Entry{ - Id: fieldID, - KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{FieldKey: field}, - Value: value, - OptionalEntry: optional, - } +func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewStructField(fieldID, field, value, optional) } -func (p *parserHelper) newComprehension(ctx any, iterVar string, - iterRange *exprpb.Expr, +func (p *parserHelper) newComprehension(ctx any, + iterRange ast.Expr, + iterVar string, accuVar string, - accuInit *exprpb.Expr, - condition *exprpb.Expr, - step *exprpb.Expr, - result *exprpb.Expr) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - AccuVar: accuVar, - AccuInit: accuInit, - IterVar: iterVar, - IterRange: iterRange, - LoopCondition: condition, - LoopStep: step, - Result: result}} - return exprNode -} - -func (p *parserHelper) newExpr(ctx any) *exprpb.Expr { - id, isID := ctx.(int64) - if isID { - return &exprpb.Expr{Id: id} + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return p.exprFactory.NewComprehension( + p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result) +} + +func (p *parserHelper) newID(ctx any) int64 { + if id, isID := ctx.(int64); isID { + return id } - return &exprpb.Expr{Id: p.id(ctx)} + return p.id(ctx) +} + +func (p *parserHelper) newExpr(ctx any) ast.Expr { + return p.exprFactory.NewUnspecifiedExpr(p.newID(ctx)) } func (p *parserHelper) id(ctx any) int64 { - var location common.Location + var offset ast.OffsetRange switch c := ctx.(type) { case antlr.ParserRuleContext: - token := c.GetStart() - location = p.source.NewLocation(token.GetLine(), token.GetColumn()) + start := c.GetStart() + offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) case antlr.Token: - token := c - location = p.source.NewLocation(token.GetLine(), token.GetColumn()) + offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) case common.Location: - location = c + offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) + offset.Stop = offset.Start + case ast.OffsetRange: + offset = c default: // This should only happen if the ctx is nil return -1 } id := p.nextID - p.positions[id], _ = p.source.LocationOffset(location) + p.sourceInfo.SetOffsetRange(id, offset) p.nextID++ return id } +func (p *parserHelper) deleteID(id int64) { + p.sourceInfo.ClearOffsetRange(id) + if id == p.nextID-1 { + p.nextID-- + } +} + func (p *parserHelper) getLocation(id int64) common.Location { - characterOffset := p.positions[id] - location, _ := p.source.OffsetLocation(characterOffset) - return location + return p.sourceInfo.GetStartLocation(id) +} + +func (p *parserHelper) getLocationByOffset(offset int32) common.Location { + return p.getSourceInfo().GetLocationByOffset(offset) } // buildMacroCallArg iterates the expression and returns a new expression // where all macros have been replaced by their IDs in MacroCalls -func (p *parserHelper) buildMacroCallArg(expr *exprpb.Expr) *exprpb.Expr { - if _, found := p.macroCalls[expr.GetId()]; found { - return &exprpb.Expr{Id: expr.GetId()} +func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr { + if _, found := p.sourceInfo.GetMacroCall(expr.ID()); found { + return p.exprFactory.NewUnspecifiedExpr(expr.ID()) } - switch expr.GetExprKind().(type) { - case *exprpb.Expr_CallExpr: + switch expr.Kind() { + case ast.CallKind: // Iterate the AST from `expr` recursively looking for macros. Because we are at most // starting from the top level macro, this recursion is bounded by the size of the AST. This // means that the depth check on the AST during parsing will catch recursion overflows // before we get to here. - macroTarget := expr.GetCallExpr().GetTarget() - if macroTarget != nil { - macroTarget = p.buildMacroCallArg(macroTarget) - } - macroArgs := make([]*exprpb.Expr, len(expr.GetCallExpr().GetArgs())) - for index, arg := range expr.GetCallExpr().GetArgs() { + call := expr.AsCall() + macroArgs := make([]ast.Expr, len(call.Args())) + for index, arg := range call.Args() { macroArgs[index] = p.buildMacroCallArg(arg) } - return &exprpb.Expr{ - Id: expr.GetId(), - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Target: macroTarget, - Function: expr.GetCallExpr().GetFunction(), - Args: macroArgs, - }, - }, + if !call.IsMemberFunction() { + return p.exprFactory.NewCall(expr.ID(), call.FunctionName(), macroArgs...) } - case *exprpb.Expr_ListExpr: - listExpr := expr.GetListExpr() - macroListArgs := make([]*exprpb.Expr, len(listExpr.GetElements())) - for i, elem := range listExpr.GetElements() { + macroTarget := p.buildMacroCallArg(call.Target()) + return p.exprFactory.NewMemberCall(expr.ID(), call.FunctionName(), macroTarget, macroArgs...) + case ast.ListKind: + list := expr.AsList() + macroListArgs := make([]ast.Expr, list.Size()) + for i, elem := range list.Elements() { macroListArgs[i] = p.buildMacroCallArg(elem) } - return &exprpb.Expr{ - Id: expr.GetId(), - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: macroListArgs, - OptionalIndices: listExpr.GetOptionalIndices(), - }, - }, - } + return p.exprFactory.NewList(expr.ID(), macroListArgs, list.OptionalIndices()) } - return expr } // addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target // that are macros, their ID will be stored instead for later self-lookups. -func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) { - macroTarget := target - if target != nil { - if _, found := p.macroCalls[target.GetId()]; found { - macroTarget = &exprpb.Expr{Id: target.GetId()} - } else { - macroTarget = p.buildMacroCallArg(target) - } - } - - macroArgs := make([]*exprpb.Expr, len(args)) +func (p *parserHelper) addMacroCall(exprID int64, function string, target ast.Expr, args ...ast.Expr) { + macroArgs := make([]ast.Expr, len(args)) for index, arg := range args { macroArgs[index] = p.buildMacroCallArg(arg) } - - p.macroCalls[exprID] = &exprpb.Expr{ - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Target: macroTarget, - Function: function, - Args: macroArgs, - }, - }, + if target == nil { + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewCall(0, function, macroArgs...)) + return + } + macroTarget := target + if _, found := p.sourceInfo.GetMacroCall(target.ID()); found { + macroTarget = p.exprFactory.NewUnspecifiedExpr(target.ID()) + } else { + macroTarget = p.buildMacroCallArg(target) } + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewMemberCall(0, function, macroTarget, macroArgs...)) } // logicManager compacts logical trees into a more efficient structure which is semantically @@ -309,71 +242,71 @@ func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprp // controversial choice as it alters the traditional order of execution assumptions present in most // expressions. type logicManager struct { - helper *parserHelper + exprFactory ast.ExprFactory function string - terms []*exprpb.Expr + terms []ast.Expr ops []int64 variadicASTs bool } // newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term. -func newVariadicLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { +func newVariadicLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { return &logicManager{ - helper: h, + exprFactory: fac, function: function, - terms: []*exprpb.Expr{term}, + terms: []ast.Expr{term}, ops: []int64{}, variadicASTs: true, } } // newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term. -func newBalancingLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { +func newBalancingLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { return &logicManager{ - helper: h, + exprFactory: fac, function: function, - terms: []*exprpb.Expr{term}, + terms: []ast.Expr{term}, ops: []int64{}, variadicASTs: false, } } // addTerm adds an operation identifier and term to the set of terms to be balanced. -func (l *logicManager) addTerm(op int64, term *exprpb.Expr) { +func (l *logicManager) addTerm(op int64, term ast.Expr) { l.terms = append(l.terms, term) l.ops = append(l.ops, op) } // toExpr renders the logic graph into an Expr value, either balancing a tree of logical // operations or creating a variadic representation of the logical operator. -func (l *logicManager) toExpr() *exprpb.Expr { +func (l *logicManager) toExpr() ast.Expr { if len(l.terms) == 1 { return l.terms[0] } if l.variadicASTs { - return l.helper.newGlobalCall(l.ops[0], l.function, l.terms...) + return l.exprFactory.NewCall(l.ops[0], l.function, l.terms...) } return l.balancedTree(0, len(l.ops)-1) } // balancedTree recursively balances the terms provided to a commutative operator. -func (l *logicManager) balancedTree(lo, hi int) *exprpb.Expr { +func (l *logicManager) balancedTree(lo, hi int) ast.Expr { mid := (lo + hi + 1) / 2 - var left *exprpb.Expr + var left ast.Expr if mid == lo { left = l.terms[mid] } else { left = l.balancedTree(lo, mid-1) } - var right *exprpb.Expr + var right ast.Expr if mid == hi { right = l.terms[mid+1] } else { right = l.balancedTree(mid+1, hi) } - return l.helper.newGlobalCall(l.ops[mid], l.function, left, right) + return l.exprFactory.NewCall(l.ops[mid], l.function, left, right) } type exprHelper struct { @@ -387,202 +320,151 @@ func (e *exprHelper) nextMacroID() int64 { // Copy implements the ExprHelper interface method by producing a copy of the input Expr value // with a fresh set of numeric identifiers the Expr and all its descendants. -func (e *exprHelper) Copy(expr *exprpb.Expr) *exprpb.Expr { - copy := e.parserHelper.newExpr(e.parserHelper.getLocation(expr.GetId())) - switch expr.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - copy.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: expr.GetConstExpr()} - case *exprpb.Expr_IdentExpr: - copy.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: expr.GetIdentExpr()} - case *exprpb.Expr_SelectExpr: - op := expr.GetSelectExpr().GetOperand() - copy.ExprKind = &exprpb.Expr_SelectExpr{SelectExpr: &exprpb.Expr_Select{ - Operand: e.Copy(op), - Field: expr.GetSelectExpr().GetField(), - TestOnly: expr.GetSelectExpr().GetTestOnly(), - }} - case *exprpb.Expr_CallExpr: - call := expr.GetCallExpr() - target := call.GetTarget() - if target != nil { - target = e.Copy(target) +func (e *exprHelper) Copy(expr ast.Expr) ast.Expr { + offsetRange, _ := e.parserHelper.sourceInfo.GetOffsetRange(expr.ID()) + copyID := e.parserHelper.newID(offsetRange) + switch expr.Kind() { + case ast.LiteralKind: + return e.exprFactory.NewLiteral(copyID, expr.AsLiteral()) + case ast.IdentKind: + return e.exprFactory.NewIdent(copyID, expr.AsIdent()) + case ast.SelectKind: + sel := expr.AsSelect() + op := e.Copy(sel.Operand()) + if sel.IsTestOnly() { + return e.exprFactory.NewPresenceTest(copyID, op, sel.FieldName()) } - args := call.GetArgs() - argsCopy := make([]*exprpb.Expr, len(args)) + return e.exprFactory.NewSelect(copyID, op, sel.FieldName()) + case ast.CallKind: + call := expr.AsCall() + args := call.Args() + argsCopy := make([]ast.Expr, len(args)) for i, arg := range args { argsCopy[i] = e.Copy(arg) } - copy.ExprKind = &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Function: call.GetFunction(), - Target: target, - Args: argsCopy, - }, + if !call.IsMemberFunction() { + return e.exprFactory.NewCall(copyID, call.FunctionName(), argsCopy...) } - case *exprpb.Expr_ListExpr: - elems := expr.GetListExpr().GetElements() - elemsCopy := make([]*exprpb.Expr, len(elems)) + return e.exprFactory.NewMemberCall(copyID, call.FunctionName(), e.Copy(call.Target()), argsCopy...) + case ast.ListKind: + list := expr.AsList() + elems := list.Elements() + elemsCopy := make([]ast.Expr, len(elems)) for i, elem := range elems { elemsCopy[i] = e.Copy(elem) } - copy.ExprKind = &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{Elements: elemsCopy}, - } - case *exprpb.Expr_StructExpr: - entries := expr.GetStructExpr().GetEntries() - entriesCopy := make([]*exprpb.Expr_CreateStruct_Entry, len(entries)) - for i, entry := range entries { - entryCopy := &exprpb.Expr_CreateStruct_Entry{} - entryCopy.Id = e.nextMacroID() - switch entry.GetKeyKind().(type) { - case *exprpb.Expr_CreateStruct_Entry_FieldKey: - entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{ - FieldKey: entry.GetFieldKey(), - } - case *exprpb.Expr_CreateStruct_Entry_MapKey: - entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: e.Copy(entry.GetMapKey()), - } - } - entryCopy.Value = e.Copy(entry.GetValue()) - entriesCopy[i] = entryCopy - } - copy.ExprKind = &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - MessageName: expr.GetStructExpr().GetMessageName(), - Entries: entriesCopy, - }, + return e.exprFactory.NewList(copyID, elemsCopy, list.OptionalIndices()) + case ast.MapKind: + m := expr.AsMap() + entries := m.Entries() + entriesCopy := make([]ast.EntryExpr, len(entries)) + for i, en := range entries { + entry := en.AsMapEntry() + entryID := e.nextMacroID() + entriesCopy[i] = e.exprFactory.NewMapEntry(entryID, + e.Copy(entry.Key()), e.Copy(entry.Value()), entry.IsOptional()) } - case *exprpb.Expr_ComprehensionExpr: - iterRange := e.Copy(expr.GetComprehensionExpr().GetIterRange()) - accuInit := e.Copy(expr.GetComprehensionExpr().GetAccuInit()) - cond := e.Copy(expr.GetComprehensionExpr().GetLoopCondition()) - step := e.Copy(expr.GetComprehensionExpr().GetLoopStep()) - result := e.Copy(expr.GetComprehensionExpr().GetResult()) - copy.ExprKind = &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - IterRange: iterRange, - IterVar: expr.GetComprehensionExpr().GetIterVar(), - AccuInit: accuInit, - AccuVar: expr.GetComprehensionExpr().GetAccuVar(), - LoopCondition: cond, - LoopStep: step, - Result: result, - }, + return e.exprFactory.NewMap(copyID, entriesCopy) + case ast.StructKind: + s := expr.AsStruct() + fields := s.Fields() + fieldsCopy := make([]ast.EntryExpr, len(fields)) + for i, f := range fields { + field := f.AsStructField() + fieldID := e.nextMacroID() + fieldsCopy[i] = e.exprFactory.NewStructField(fieldID, + field.Name(), e.Copy(field.Value()), field.IsOptional()) } + return e.exprFactory.NewStruct(copyID, s.TypeName(), fieldsCopy) + case ast.ComprehensionKind: + compre := expr.AsComprehension() + iterRange := e.Copy(compre.IterRange()) + accuInit := e.Copy(compre.AccuInit()) + cond := e.Copy(compre.LoopCondition()) + step := e.Copy(compre.LoopStep()) + result := e.Copy(compre.Result()) + return e.exprFactory.NewComprehension(copyID, + iterRange, compre.IterVar(), compre.AccuVar(), accuInit, cond, step, result) } - return copy -} - -// LiteralBool implements the ExprHelper interface method. -func (e *exprHelper) LiteralBool(value bool) *exprpb.Expr { - return e.parserHelper.newLiteralBool(e.nextMacroID(), value) + return e.exprFactory.NewUnspecifiedExpr(copyID) } -// LiteralBytes implements the ExprHelper interface method. -func (e *exprHelper) LiteralBytes(value []byte) *exprpb.Expr { - return e.parserHelper.newLiteralBytes(e.nextMacroID(), value) -} - -// LiteralDouble implements the ExprHelper interface method. -func (e *exprHelper) LiteralDouble(value float64) *exprpb.Expr { - return e.parserHelper.newLiteralDouble(e.nextMacroID(), value) -} - -// LiteralInt implements the ExprHelper interface method. -func (e *exprHelper) LiteralInt(value int64) *exprpb.Expr { - return e.parserHelper.newLiteralInt(e.nextMacroID(), value) -} - -// LiteralString implements the ExprHelper interface method. -func (e *exprHelper) LiteralString(value string) *exprpb.Expr { - return e.parserHelper.newLiteralString(e.nextMacroID(), value) -} - -// LiteralUint implements the ExprHelper interface method. -func (e *exprHelper) LiteralUint(value uint64) *exprpb.Expr { - return e.parserHelper.newLiteralUint(e.nextMacroID(), value) +// NewLiteral implements the ExprHelper interface method. +func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr { + return e.exprFactory.NewLiteral(e.nextMacroID(), value) } // NewList implements the ExprHelper interface method. -func (e *exprHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newList(e.nextMacroID(), elems) +func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr { + return e.exprFactory.NewList(e.nextMacroID(), elems, []int32{}) } // NewMap implements the ExprHelper interface method. -func (e *exprHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - return e.parserHelper.newMap(e.nextMacroID(), entries...) +func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewMap(e.nextMacroID(), entries) } // NewMapEntry implements the ExprHelper interface method. -func (e *exprHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return e.parserHelper.newMapEntry(e.nextMacroID(), key, val, optional) +func (e *exprHelper) NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewMapEntry(e.nextMacroID(), key, val, optional) } -// NewObject implements the ExprHelper interface method. -func (e *exprHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - return e.parserHelper.newObject(e.nextMacroID(), typeName, fieldInits...) +// NewStruct implements the ExprHelper interface method. +func (e *exprHelper) NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewStruct(e.nextMacroID(), typeName, fieldInits) } -// NewObjectFieldInit implements the ExprHelper interface method. -func (e *exprHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return e.parserHelper.newObjectField(e.nextMacroID(), field, init, optional) +// NewStructField implements the ExprHelper interface method. +func (e *exprHelper) NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewStructField(e.nextMacroID(), field, init, optional) } -// Fold implements the ExprHelper interface method. -func (e *exprHelper) Fold(iterVar string, - iterRange *exprpb.Expr, +// NewComprehension implements the ExprHelper interface method. +func (e *exprHelper) NewComprehension( + iterRange ast.Expr, + iterVar string, accuVar string, - accuInit *exprpb.Expr, - condition *exprpb.Expr, - step *exprpb.Expr, - result *exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newComprehension( - e.nextMacroID(), iterVar, iterRange, accuVar, accuInit, condition, step, result) + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return e.exprFactory.NewComprehension( + e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result) } -// Ident implements the ExprHelper interface method. -func (e *exprHelper) Ident(name string) *exprpb.Expr { - return e.parserHelper.newIdent(e.nextMacroID(), name) +// NewIdent implements the ExprHelper interface method. +func (e *exprHelper) NewIdent(name string) ast.Expr { + return e.exprFactory.NewIdent(e.nextMacroID(), name) } -// AccuIdent implements the ExprHelper interface method. -func (e *exprHelper) AccuIdent() *exprpb.Expr { - return e.parserHelper.newIdent(e.nextMacroID(), AccumulatorName) +// NewAccuIdent implements the ExprHelper interface method. +func (e *exprHelper) NewAccuIdent() ast.Expr { + return e.exprFactory.NewAccuIdent(e.nextMacroID()) } -// GlobalCall implements the ExprHelper interface method. -func (e *exprHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newGlobalCall(e.nextMacroID(), function, args...) +// NewGlobalCall implements the ExprHelper interface method. +func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewCall(e.nextMacroID(), function, args...) } -// ReceiverCall implements the ExprHelper interface method. -func (e *exprHelper) ReceiverCall(function string, - target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newReceiverCall(e.nextMacroID(), function, target, args...) +// NewMemberCall implements the ExprHelper interface method. +func (e *exprHelper) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewMemberCall(e.nextMacroID(), function, target, args...) } -// PresenceTest implements the ExprHelper interface method. -func (e *exprHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr { - return e.parserHelper.newPresenceTest(e.nextMacroID(), operand, field) +// NewPresenceTest implements the ExprHelper interface method. +func (e *exprHelper) NewPresenceTest(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewPresenceTest(e.nextMacroID(), operand, field) } -// Select implements the ExprHelper interface method. -func (e *exprHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { - return e.parserHelper.newSelect(e.nextMacroID(), operand, field) +// NewSelect implements the ExprHelper interface method. +func (e *exprHelper) NewSelect(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewSelect(e.nextMacroID(), operand, field) } // OffsetLocation implements the ExprHelper interface method. func (e *exprHelper) OffsetLocation(exprID int64) common.Location { - offset, found := e.parserHelper.positions[exprID] - if !found { - return common.NoLocation - } - location, found := e.parserHelper.source.OffsetLocation(offset) - if !found { - return common.NoLocation - } - return location + return e.parserHelper.sourceInfo.GetStartLocation(exprID) } // NewError associates an error message with a given expression id, populating the source offset location of the error if possible. diff --git a/vendor/github.com/google/cel-go/parser/input.go b/vendor/github.com/google/cel-go/parser/input.go index 810eaff218..44792455d8 100644 --- a/vendor/github.com/google/cel-go/parser/input.go +++ b/vendor/github.com/google/cel-go/parser/input.go @@ -15,7 +15,7 @@ package parser import ( - antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4" + antlr "github.com/antlr4-go/antlr/v4" "github.com/google/cel-go/common/runes" ) @@ -110,7 +110,7 @@ func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string { } // GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval. -func (c *charStream) GetTextFromInterval(i *antlr.Interval) string { +func (c *charStream) GetTextFromInterval(i antlr.Interval) string { return c.GetText(i.Start, i.Stop) } diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go index 6066e8ef4f..c1936b69fe 100644 --- a/vendor/github.com/google/cel-go/parser/macro.go +++ b/vendor/github.com/google/cel-go/parser/macro.go @@ -18,9 +18,10 @@ import ( "fmt" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" ) // NewGlobalMacro creates a Macro for a global function with the specified arg count. @@ -142,58 +143,38 @@ func makeVarArgMacroKey(name string, receiverStyle bool) string { // and produces as output an Expr ast node. // // Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. -type MacroExpander func(eh ExprHelper, - target *exprpb.Expr, - args []*exprpb.Expr) (*exprpb.Expr, *common.Error) +type MacroExpander func(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) -// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is -// consistent with the source position and expression id generation code leveraged by both -// the parser and type-checker. +// ExprHelper assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. type ExprHelper interface { // Copy the input expression with a brand new set of identifiers. - Copy(*exprpb.Expr) *exprpb.Expr - - // LiteralBool creates an Expr value for a bool literal. - LiteralBool(value bool) *exprpb.Expr - - // LiteralBytes creates an Expr value for a byte literal. - LiteralBytes(value []byte) *exprpb.Expr - - // LiteralDouble creates an Expr value for double literal. - LiteralDouble(value float64) *exprpb.Expr + Copy(ast.Expr) ast.Expr - // LiteralInt creates an Expr value for an int literal. - LiteralInt(value int64) *exprpb.Expr + // Literal creates an Expr value for a scalar literal value. + NewLiteral(value ref.Val) ast.Expr - // LiteralString creates am Expr value for a string literal. - LiteralString(value string) *exprpb.Expr - - // LiteralUint creates an Expr value for a uint literal. - LiteralUint(value uint64) *exprpb.Expr - - // NewList creates a CreateList instruction where the list is comprised of the optional set - // of elements provided as arguments. - NewList(elems ...*exprpb.Expr) *exprpb.Expr + // NewList creates a list literal instruction with an optional set of elements. + NewList(elems ...ast.Expr) ast.Expr // NewMap creates a CreateStruct instruction for a map where the map is comprised of the // optional set of key, value entries. - NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + NewMap(entries ...ast.EntryExpr) ast.Expr // NewMapEntry creates a Map Entry for the key, value pair. - NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr - // NewObject creates a CreateStruct instruction for an object with a given type name and - // optional set of field initializers. - NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + // NewStruct creates a struct literal expression with an optional set of field initializers. + NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr - // NewObjectFieldInit creates a new Object field initializer from the field name and value. - NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + // NewStructField creates a new struct field initializer from the field name and value. + NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr - // Fold creates a fold comprehension instruction. + // NewComprehension creates a new comprehension instruction. // - // - iterVar is the iteration variable name. // - iterRange represents the expression that resolves to a list or map where the elements or // keys (respectively) will be iterated over. + // - iterVar is the iteration variable name. // - accuVar is the accumulation variable name, typically parser.AccumulatorName. // - accuInit is the initial expression whose value will be set for the accuVar prior to // folding. @@ -204,31 +185,31 @@ type ExprHelper interface { // The accuVar should not shadow variable names that you would like to reference within the // environment in the step and condition expressions. Presently, the name __result__ is commonly // used by built-in macros but this may change in the future. - Fold(iterVar string, - iterRange *exprpb.Expr, + NewComprehension(iterRange ast.Expr, + iterVar string, accuVar string, - accuInit *exprpb.Expr, - condition *exprpb.Expr, - step *exprpb.Expr, - result *exprpb.Expr) *exprpb.Expr + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr - // Ident creates an identifier Expr value. - Ident(name string) *exprpb.Expr + // NewIdent creates an identifier Expr value. + NewIdent(name string) ast.Expr - // AccuIdent returns an accumulator identifier for use with comprehension results. - AccuIdent() *exprpb.Expr + // NewAccuIdent returns an accumulator identifier for use with comprehension results. + NewAccuIdent() ast.Expr - // GlobalCall creates a function call Expr value for a global (free) function. - GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr + // NewCall creates a function call Expr value for a global (free) function. + NewCall(function string, args ...ast.Expr) ast.Expr - // ReceiverCall creates a function call Expr value for a receiver-style function. - ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr + // NewMemberCall creates a function call Expr value for a receiver-style function. + NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr - // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. - PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr + // NewPresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + NewPresenceTest(operand ast.Expr, field string) ast.Expr - // Select create a field traversal Expr value. - Select(operand *exprpb.Expr, field string) *exprpb.Expr + // NewSelect create a field traversal Expr value. + NewSelect(operand ast.Expr, field string) ast.Expr // OffsetLocation returns the Location of the expression identifier. OffsetLocation(exprID int64) common.Location @@ -296,21 +277,21 @@ const ( // MakeAll expands the input call arguments into a comprehension that returns true if all of the // elements in the range match the predicate expressions: // .all(, ) -func MakeAll(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeAll(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { return makeQuantifier(quantifierAll, eh, target, args) } // MakeExists expands the input call arguments into a comprehension that returns true if any of the // elements in the range match the predicate expressions: // .exists(, ) -func MakeExists(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeExists(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { return makeQuantifier(quantifierExists, eh, target, args) } // MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly // one of the elements in the range match the predicate expressions: // .exists_one(, ) -func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeExistsOne(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { return makeQuantifier(quantifierExistsOne, eh, target, args) } @@ -324,14 +305,14 @@ func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*ex // // In the second form only iterVar values which return true when provided to the predicate expression // are transformed. -func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } - var fn *exprpb.Expr - var filter *exprpb.Expr + var fn ast.Expr + var filter ast.Expr if len(args) == 3 { filter = args[1] @@ -341,84 +322,81 @@ func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.E fn = args[1] } - accuExpr := eh.Ident(AccumulatorName) init := eh.NewList() - condition := eh.LiteralBool(true) - step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(fn)) + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(fn)) if filter != nil { - step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr) + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) } - return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil } // MakeFilter expands the input call arguments into a comprehension which produces a list which contains // only elements which match the provided predicate expression: // .filter(, ) -func MakeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } filter := args[1] - accuExpr := eh.Ident(AccumulatorName) init := eh.NewList() - condition := eh.LiteralBool(true) - step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(args[0])) - step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr) - return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(args[0])) + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil } // MakeHas expands the input call arguments into a presence test, e.g. has(.field) -func MakeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok { - return eh.PresenceTest(s.SelectExpr.GetOperand(), s.SelectExpr.GetField()), nil +func MakeHas(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + if args[0].Kind() == ast.SelectKind { + s := args[0].AsSelect() + return eh.NewPresenceTest(s.Operand(), s.FieldName()), nil } - return nil, eh.NewError(args[0].GetId(), "invalid argument to has() macro") + return nil, eh.NewError(args[0].ID(), "invalid argument to has() macro") } -func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument must be a simple name") + return nil, eh.NewError(args[0].ID(), "argument must be a simple name") } - var init *exprpb.Expr - var condition *exprpb.Expr - var step *exprpb.Expr - var result *exprpb.Expr + var init ast.Expr + var condition ast.Expr + var step ast.Expr + var result ast.Expr switch kind { case quantifierAll: - init = eh.LiteralBool(true) - condition = eh.GlobalCall(operators.NotStrictlyFalse, eh.AccuIdent()) - step = eh.GlobalCall(operators.LogicalAnd, eh.AccuIdent(), args[1]) - result = eh.AccuIdent() + init = eh.NewLiteral(types.True) + condition = eh.NewCall(operators.NotStrictlyFalse, eh.NewAccuIdent()) + step = eh.NewCall(operators.LogicalAnd, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() case quantifierExists: - init = eh.LiteralBool(false) - condition = eh.GlobalCall( + init = eh.NewLiteral(types.False) + condition = eh.NewCall( operators.NotStrictlyFalse, - eh.GlobalCall(operators.LogicalNot, eh.AccuIdent())) - step = eh.GlobalCall(operators.LogicalOr, eh.AccuIdent(), args[1]) - result = eh.AccuIdent() + eh.NewCall(operators.LogicalNot, eh.NewAccuIdent())) + step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() case quantifierExistsOne: - zeroExpr := eh.LiteralInt(0) - oneExpr := eh.LiteralInt(1) - init = zeroExpr - condition = eh.LiteralBool(true) - step = eh.GlobalCall(operators.Conditional, args[1], - eh.GlobalCall(operators.Add, eh.AccuIdent(), oneExpr), eh.AccuIdent()) - result = eh.GlobalCall(operators.Equals, eh.AccuIdent(), oneExpr) + init = eh.NewLiteral(types.Int(0)) + condition = eh.NewLiteral(types.True) + step = eh.NewCall(operators.Conditional, args[1], + eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))), eh.NewAccuIdent()) + result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))) default: - return nil, eh.NewError(args[0].GetId(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) + return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) } - return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil } -func extractIdent(e *exprpb.Expr) (string, bool) { - switch e.ExprKind.(type) { - case *exprpb.Expr_IdentExpr: - return e.GetIdentExpr().GetName(), true +func extractIdent(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + return e.AsIdent(), true } return "", false } diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go index 109326a939..5cbb176727 100644 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -21,17 +21,15 @@ import ( "regexp" "strconv" "strings" - "sync" - antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4" + antlr "github.com/antlr4-go/antlr/v4" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/runes" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser/gen" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - structpb "google.golang.org/protobuf/types/known/structpb" ) // Parser encapsulates the context necessary to perform parsing for different expressions. @@ -88,11 +86,13 @@ func mustNewParser(opts ...Option) *Parser { } // Parse parses the expression represented by source and returns the result. -func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) { +func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { errs := common.NewErrors(source) + fac := ast.NewExprFactory() impl := parser{ errors: &parseErrors{errs}, - helper: newParserHelper(source), + exprFactory: fac, + helper: newParserHelper(source, fac), macros: p.macros, maxRecursionDepth: p.maxRecursionDepth, errorReportingLimit: p.errorReportingLimit, @@ -106,18 +106,15 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors if !ok { buf = runes.NewBuffer(source.Content()) } - var e *exprpb.Expr + var out ast.Expr if buf.Len() > p.expressionSizeCodePointLimit { - e = impl.reportError(common.NoLocation, + out = impl.reportError(common.NoLocation, "expression code point size exceeds limit: size: %d, limit %d", buf.Len(), p.expressionSizeCodePointLimit) } else { - e = impl.parse(buf, source.Description()) + out = impl.parse(buf, source.Description()) } - return &exprpb.ParsedExpr{ - Expr: e, - SourceInfo: impl.helper.getSourceInfo(), - }, errs + return ast.NewAST(out, impl.helper.getSourceInfo()), errs } // reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid @@ -150,7 +147,7 @@ var reservedIds = map[string]struct{}{ // This function calls ParseWithMacros with AllMacros. // // Deprecated: Use NewParser().Parse() instead. -func Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) { +func Parse(source common.Source) (*ast.AST, *common.Errors) { return mustNewParser(Macros(AllMacros...)).Parse(source) } @@ -287,6 +284,7 @@ var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{} type parser struct { gen.BaseCELVisitor errors *parseErrors + exprFactory ast.ExprFactory helper *parserHelper macros map[string]Macro recursionDepth int @@ -300,53 +298,21 @@ type parser struct { enableVariadicOperatorASTs bool } -var ( - _ gen.CELVisitor = (*parser)(nil) - - lexerPool *sync.Pool = &sync.Pool{ - New: func() any { - l := gen.NewCELLexer(nil) - l.RemoveErrorListeners() - return l - }, - } +var _ gen.CELVisitor = (*parser)(nil) - parserPool *sync.Pool = &sync.Pool{ - New: func() any { - p := gen.NewCELParser(nil) - p.RemoveErrorListeners() - return p - }, - } -) +func (p *parser) parse(expr runes.Buffer, desc string) ast.Expr { + lexer := gen.NewCELLexer(newCharStream(expr, desc)) + lexer.RemoveErrorListeners() + lexer.AddErrorListener(p) -func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { - // TODO: get rid of these pools once https://github.com/antlr/antlr4/pull/3571 is in a release - lexer := lexerPool.Get().(*gen.CELLexer) - prsr := parserPool.Get().(*gen.CELParser) + prsr := gen.NewCELParser(antlr.NewCommonTokenStream(lexer, 0)) + prsr.RemoveErrorListeners() prsrListener := &recursionListener{ maxDepth: p.maxRecursionDepth, ruleTypeDepth: map[int]*int{}, } - defer func() { - // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners, - // so this is good enough until that is exported. - // Reset the lexer and parser before putting them back in the pool. - lexer.RemoveErrorListeners() - prsr.RemoveParseListener(prsrListener) - prsr.RemoveErrorListeners() - lexer.SetInputStream(nil) - prsr.SetInputStream(nil) - lexerPool.Put(lexer) - parserPool.Put(prsr) - }() - - lexer.SetInputStream(newCharStream(expr, desc)) - prsr.SetInputStream(antlr.NewCommonTokenStream(lexer, 0)) - - lexer.AddErrorListener(p) prsr.AddErrorListener(p) prsr.AddParseListener(prsrListener) @@ -373,7 +339,7 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { } }() - return p.Visit(prsr.Start()).(*exprpb.Expr) + return p.Visit(prsr.Start_()).(ast.Expr) } // Visitor implementations. @@ -470,26 +436,26 @@ func (p *parser) VisitStart(ctx *gen.StartContext) any { // Visit a parse tree produced by CELParser#expr. func (p *parser) VisitExpr(ctx *gen.ExprContext) any { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) + result := p.Visit(ctx.GetE()).(ast.Expr) if ctx.GetOp() == nil { return result } opID := p.helper.id(ctx.GetOp()) - ifTrue := p.Visit(ctx.GetE1()).(*exprpb.Expr) - ifFalse := p.Visit(ctx.GetE2()).(*exprpb.Expr) + ifTrue := p.Visit(ctx.GetE1()).(ast.Expr) + ifFalse := p.Visit(ctx.GetE2()).(ast.Expr) return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse) } // Visit a parse tree produced by CELParser#conditionalOr. func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) + result := p.Visit(ctx.GetE()).(ast.Expr) l := p.newLogicManager(operators.LogicalOr, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { return p.reportError(ctx, "unexpected character, wanted '||'") } - next := p.Visit(rest[i]).(*exprpb.Expr) + next := p.Visit(rest[i]).(ast.Expr) opID := p.helper.id(op) l.addTerm(opID, next) } @@ -498,14 +464,14 @@ func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { // Visit a parse tree produced by CELParser#conditionalAnd. func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) + result := p.Visit(ctx.GetE()).(ast.Expr) l := p.newLogicManager(operators.LogicalAnd, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { return p.reportError(ctx, "unexpected character, wanted '&&'") } - next := p.Visit(rest[i]).(*exprpb.Expr) + next := p.Visit(rest[i]).(ast.Expr) opID := p.helper.id(op) l.addTerm(opID, next) } @@ -519,9 +485,9 @@ func (p *parser) VisitRelation(ctx *gen.RelationContext) any { opText = ctx.GetOp().GetText() } if op, found := operators.Find(opText); found { - lhs := p.Visit(ctx.Relation(0)).(*exprpb.Expr) + lhs := p.Visit(ctx.Relation(0)).(ast.Expr) opID := p.helper.id(ctx.GetOp()) - rhs := p.Visit(ctx.Relation(1)).(*exprpb.Expr) + rhs := p.Visit(ctx.Relation(1)).(ast.Expr) return p.globalCallOrMacro(opID, op, lhs, rhs) } return p.reportError(ctx, "operator not found") @@ -534,9 +500,9 @@ func (p *parser) VisitCalc(ctx *gen.CalcContext) any { opText = ctx.GetOp().GetText() } if op, found := operators.Find(opText); found { - lhs := p.Visit(ctx.Calc(0)).(*exprpb.Expr) + lhs := p.Visit(ctx.Calc(0)).(ast.Expr) opID := p.helper.id(ctx.GetOp()) - rhs := p.Visit(ctx.Calc(1)).(*exprpb.Expr) + rhs := p.Visit(ctx.Calc(1)).(ast.Expr) return p.globalCallOrMacro(opID, op, lhs, rhs) } return p.reportError(ctx, "operator not found") @@ -552,7 +518,7 @@ func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any { return p.Visit(ctx.Member()) } opID := p.helper.id(ctx.GetOps()[0]) - target := p.Visit(ctx.Member()).(*exprpb.Expr) + target := p.Visit(ctx.Member()).(ast.Expr) return p.globalCallOrMacro(opID, operators.LogicalNot, target) } @@ -561,13 +527,13 @@ func (p *parser) VisitNegate(ctx *gen.NegateContext) any { return p.Visit(ctx.Member()) } opID := p.helper.id(ctx.GetOps()[0]) - target := p.Visit(ctx.Member()).(*exprpb.Expr) + target := p.Visit(ctx.Member()).(ast.Expr) return p.globalCallOrMacro(opID, operators.Negate, target) } // VisitSelect visits a parse tree produced by CELParser#Select. func (p *parser) VisitSelect(ctx *gen.SelectContext) any { - operand := p.Visit(ctx.Member()).(*exprpb.Expr) + operand := p.Visit(ctx.Member()).(ast.Expr) // Handle the error case where no valid identifier is specified. if ctx.GetId() == nil || ctx.GetOp() == nil { return p.helper.newExpr(ctx) @@ -588,7 +554,7 @@ func (p *parser) VisitSelect(ctx *gen.SelectContext) any { // VisitMemberCall visits a parse tree produced by CELParser#MemberCall. func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any { - operand := p.Visit(ctx.Member()).(*exprpb.Expr) + operand := p.Visit(ctx.Member()).(ast.Expr) // Handle the error case where no valid identifier is specified. if ctx.GetId() == nil { return p.helper.newExpr(ctx) @@ -600,13 +566,13 @@ func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any { // Visit a parse tree produced by CELParser#Index. func (p *parser) VisitIndex(ctx *gen.IndexContext) any { - target := p.Visit(ctx.Member()).(*exprpb.Expr) + target := p.Visit(ctx.Member()).(ast.Expr) // Handle the error case where no valid identifier is specified. if ctx.GetOp() == nil { return p.helper.newExpr(ctx) } opID := p.helper.id(ctx.GetOp()) - index := p.Visit(ctx.GetIndex()).(*exprpb.Expr) + index := p.Visit(ctx.GetIndex()).(ast.Expr) operator := operators.Index if ctx.GetOpt() != nil { if !p.enableOptionalSyntax { @@ -630,7 +596,7 @@ func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any { messageName = "." + messageName } objID := p.helper.id(ctx.GetOp()) - entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry) + entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]ast.EntryExpr) return p.helper.newObject(objID, messageName, entries...) } @@ -638,16 +604,16 @@ func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any { func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any { if ctx == nil || ctx.GetFields() == nil { // This is the result of a syntax error handled elswhere, return empty. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } - result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetFields())) + result := make([]ast.EntryExpr, len(ctx.GetFields())) cols := ctx.GetCols() vals := ctx.GetValues() for i, f := range ctx.GetFields() { if i >= len(cols) || i >= len(vals) { // This is the result of a syntax error detected elsewhere. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } initID := p.helper.id(cols[i]) optField := f.(*gen.OptFieldContext) @@ -659,10 +625,10 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext // The field may be empty due to a prior error. id := optField.IDENTIFIER() if id == nil { - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } fieldName := id.GetText() - value := p.Visit(vals[i]).(*exprpb.Expr) + value := p.Visit(vals[i]).(ast.Expr) field := p.helper.newObjectField(initID, fieldName, value, optional) result[i] = field } @@ -702,9 +668,9 @@ func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any { // Visit a parse tree produced by CELParser#CreateStruct. func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any { structID := p.helper.id(ctx.GetOp()) - entries := []*exprpb.Expr_CreateStruct_Entry{} + entries := []ast.EntryExpr{} if ctx.GetEntries() != nil { - entries = p.Visit(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry) + entries = p.Visit(ctx.GetEntries()).([]ast.EntryExpr) } return p.helper.newMap(structID, entries...) } @@ -713,17 +679,17 @@ func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any { func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any { if ctx == nil || ctx.GetKeys() == nil { // This is the result of a syntax error handled elswhere, return empty. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } - result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetCols())) + result := make([]ast.EntryExpr, len(ctx.GetCols())) keys := ctx.GetKeys() vals := ctx.GetValues() for i, col := range ctx.GetCols() { colID := p.helper.id(col) if i >= len(keys) || i >= len(vals) { // This is the result of a syntax error detected elsewhere. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } optKey := keys[i] optional := optKey.GetOpt() != nil @@ -731,8 +697,8 @@ func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any p.reportError(optKey, "unsupported syntax '?'") continue } - key := p.Visit(optKey.GetE()).(*exprpb.Expr) - value := p.Visit(vals[i]).(*exprpb.Expr) + key := p.Visit(optKey.GetE()).(ast.Expr) + value := p.Visit(vals[i]).(ast.Expr) entry := p.helper.newMapEntry(colID, key, value, optional) result[i] = entry } @@ -812,30 +778,27 @@ func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any { // Visit a parse tree produced by CELParser#Null. func (p *parser) VisitNull(ctx *gen.NullContext) any { - return p.helper.newLiteral(ctx, - &exprpb.Constant{ - ConstantKind: &exprpb.Constant_NullValue{ - NullValue: structpb.NullValue_NULL_VALUE}}) + return p.helper.exprFactory.NewLiteral(p.helper.newID(ctx), types.NullValue) } -func (p *parser) visitExprList(ctx gen.IExprListContext) []*exprpb.Expr { +func (p *parser) visitExprList(ctx gen.IExprListContext) []ast.Expr { if ctx == nil { - return []*exprpb.Expr{} + return []ast.Expr{} } return p.visitSlice(ctx.GetE()) } -func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int32) { +func (p *parser) visitListInit(ctx gen.IListInitContext) ([]ast.Expr, []int32) { if ctx == nil { - return []*exprpb.Expr{}, []int32{} + return []ast.Expr{}, []int32{} } elements := ctx.GetElems() - result := make([]*exprpb.Expr, len(elements)) + result := make([]ast.Expr, len(elements)) optionals := []int32{} for i, e := range elements { - ex := p.Visit(e.GetE()).(*exprpb.Expr) + ex := p.Visit(e.GetE()).(ast.Expr) if ex == nil { - return []*exprpb.Expr{}, []int32{} + return []ast.Expr{}, []int32{} } result[i] = ex if e.GetOpt() != nil { @@ -849,13 +812,13 @@ func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int3 return result, optionals } -func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr { +func (p *parser) visitSlice(expressions []gen.IExprContext) []ast.Expr { if expressions == nil { - return []*exprpb.Expr{} + return []ast.Expr{} } - result := make([]*exprpb.Expr, len(expressions)) + result := make([]ast.Expr, len(expressions)) for i, e := range expressions { - ex := p.Visit(e).(*exprpb.Expr) + ex := p.Visit(e).(ast.Expr) result[i] = ex } return result @@ -870,30 +833,31 @@ func (p *parser) unquote(ctx any, value string, isBytes bool) string { return text } -func (p *parser) newLogicManager(function string, term *exprpb.Expr) *logicManager { +func (p *parser) newLogicManager(function string, term ast.Expr) *logicManager { if p.enableVariadicOperatorASTs { - return newVariadicLogicManager(p.helper, function, term) + return newVariadicLogicManager(p.exprFactory, function, term) } - return newBalancingLogicManager(p.helper, function, term) + return newBalancingLogicManager(p.exprFactory, function, term) } -func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr { +func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr { var location common.Location err := p.helper.newExpr(ctx) switch c := ctx.(type) { case common.Location: location = c case antlr.Token, antlr.ParserRuleContext: - location = p.helper.getLocation(err.GetId()) + location = p.helper.getLocation(err.ID()) } // Provide arguments to the report error. - p.errors.reportErrorAtID(err.GetId(), location, format, args...) + p.errors.reportErrorAtID(err.ID(), location, format, args...) return err } // ANTLR Parse listener implementations func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { - l := p.helper.source.NewLocation(line, column) + offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column)) + l := p.helper.getLocationByOffset(offset) // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error // messages out of ANTLR to prevent future breaking changes related to error message content. @@ -912,33 +876,33 @@ func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, l } } -func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) { +func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { // Intentional } -func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) { +func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { // Intentional } -func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) { +func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs *antlr.ATNConfigSet) { // Intentional } -func (p *parser) globalCallOrMacro(exprID int64, function string, args ...*exprpb.Expr) *exprpb.Expr { +func (p *parser) globalCallOrMacro(exprID int64, function string, args ...ast.Expr) ast.Expr { if expr, found := p.expandMacro(exprID, function, nil, args...); found { return expr } return p.helper.newGlobalCall(exprID, function, args...) } -func (p *parser) receiverCallOrMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { +func (p *parser) receiverCallOrMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) ast.Expr { if expr, found := p.expandMacro(exprID, function, target, args...); found { return expr } return p.helper.newReceiverCall(exprID, function, target, args...) } -func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) (*exprpb.Expr, bool) { +func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) (ast.Expr, bool) { macro, found := p.macros[makeMacroKey(function, len(args), target != nil)] if !found { macro, found = p.macros[makeVarArgMacroKey(function, target != nil)] @@ -953,10 +917,12 @@ func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, expr, err := macro.Expander()(eh, target, args) // An error indicates that the macro was matched, but the arguments were not well-formed. if err != nil { - if err.Location != nil { - return p.reportError(err.Location, err.Message), true + loc := err.Location + if loc == nil { + loc = p.helper.getLocation(exprID) } - return p.reportError(p.helper.getLocation(exprID), err.Message), true + p.helper.deleteID(exprID) + return p.reportError(loc, err.Message), true } // A nil value from the macro indicates that the macro implementation decided that // an expansion should not be performed. @@ -964,8 +930,9 @@ func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, return nil, false } if p.populateMacroCalls { - p.helper.addMacroCall(expr.GetId(), function, target, args...) + p.helper.addMacroCall(expr.ID(), function, target, args...) } + p.helper.deleteID(exprID) return expr, true } diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go index c3c40a0dd3..91cf729447 100644 --- a/vendor/github.com/google/cel-go/parser/unparser.go +++ b/vendor/github.com/google/cel-go/parser/unparser.go @@ -20,9 +20,9 @@ import ( "strconv" "strings" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types" ) // Unparse takes an input expression and source position information and generates a human-readable @@ -39,7 +39,7 @@ import ( // // This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as // performing word wrapping on expressions. -func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo, opts ...UnparserOption) (string, error) { +func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (string, error) { unparserOpts := &unparserOption{ wrapOnColumn: defaultWrapOnColumn, wrapAfterColumnLimit: defaultWrapAfterColumnLimit, @@ -68,12 +68,12 @@ func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo, opts ...UnparserOption) // unparser visits an expression to reconstruct a human-readable string from an AST. type unparser struct { str strings.Builder - info *exprpb.SourceInfo + info *ast.SourceInfo options *unparserOption lastWrappedIndex int } -func (un *unparser) visit(expr *exprpb.Expr) error { +func (un *unparser) visit(expr ast.Expr) error { if expr == nil { return errors.New("unsupported expression") } @@ -81,27 +81,29 @@ func (un *unparser) visit(expr *exprpb.Expr) error { if visited || err != nil { return err } - switch expr.GetExprKind().(type) { - case *exprpb.Expr_CallExpr: + switch expr.Kind() { + case ast.CallKind: return un.visitCall(expr) - case *exprpb.Expr_ConstExpr: + case ast.LiteralKind: return un.visitConst(expr) - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: return un.visitIdent(expr) - case *exprpb.Expr_ListExpr: + case ast.ListKind: return un.visitList(expr) - case *exprpb.Expr_SelectExpr: + case ast.MapKind: + return un.visitStructMap(expr) + case ast.SelectKind: return un.visitSelect(expr) - case *exprpb.Expr_StructExpr: - return un.visitStruct(expr) + case ast.StructKind: + return un.visitStructMsg(expr) default: return fmt.Errorf("unsupported expression: %v", expr) } } -func (un *unparser) visitCall(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() +func (un *unparser) visitCall(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() switch fun { // ternary operator case operators.Conditional: @@ -141,10 +143,10 @@ func (un *unparser) visitCall(expr *exprpb.Expr) error { } } -func (un *unparser) visitCallBinary(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() +func (un *unparser) visitCallBinary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() lhs := args[0] // add parens if the current operator is lower precedence than the lhs expr operator. lhsParen := isComplexOperatorWithRespectTo(fun, lhs) @@ -168,9 +170,9 @@ func (un *unparser) visitCallBinary(expr *exprpb.Expr) error { return un.visitMaybeNested(rhs, rhsParen) } -func (un *unparser) visitCallConditional(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - args := c.GetArgs() +func (un *unparser) visitCallConditional(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() // add parens if operand is a conditional itself. nested := isSamePrecedence(operators.Conditional, args[0]) || isComplexOperator(args[0]) @@ -196,13 +198,13 @@ func (un *unparser) visitCallConditional(expr *exprpb.Expr) error { return un.visitMaybeNested(args[2], nested) } -func (un *unparser) visitCallFunc(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() - if c.GetTarget() != nil { - nested := isBinaryOrTernaryOperator(c.GetTarget()) - err := un.visitMaybeNested(c.GetTarget(), nested) +func (un *unparser) visitCallFunc(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + if c.IsMemberFunction() { + nested := isBinaryOrTernaryOperator(c.Target()) + err := un.visitMaybeNested(c.Target(), nested) if err != nil { return err } @@ -223,17 +225,17 @@ func (un *unparser) visitCallFunc(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitCallIndex(expr *exprpb.Expr) error { +func (un *unparser) visitCallIndex(expr ast.Expr) error { return un.visitCallIndexInternal(expr, "[") } -func (un *unparser) visitCallOptIndex(expr *exprpb.Expr) error { +func (un *unparser) visitCallOptIndex(expr ast.Expr) error { return un.visitCallIndexInternal(expr, "[?") } -func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error { - c := expr.GetCallExpr() - args := c.GetArgs() +func (un *unparser) visitCallIndexInternal(expr ast.Expr, op string) error { + c := expr.AsCall() + args := c.Args() nested := isBinaryOrTernaryOperator(args[0]) err := un.visitMaybeNested(args[0], nested) if err != nil { @@ -248,10 +250,10 @@ func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error { return nil } -func (un *unparser) visitCallUnary(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() +func (un *unparser) visitCallUnary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() unmangled, found := operators.FindReverse(fun) if !found { return fmt.Errorf("cannot unmangle operator: %s", fun) @@ -261,35 +263,34 @@ func (un *unparser) visitCallUnary(expr *exprpb.Expr) error { return un.visitMaybeNested(args[0], nested) } -func (un *unparser) visitConst(expr *exprpb.Expr) error { - c := expr.GetConstExpr() - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - un.str.WriteString(strconv.FormatBool(c.GetBoolValue())) - case *exprpb.Constant_BytesValue: +func (un *unparser) visitConst(expr ast.Expr) error { + val := expr.AsLiteral() + switch val := val.(type) { + case types.Bool: + un.str.WriteString(strconv.FormatBool(bool(val))) + case types.Bytes: // bytes constants are surrounded with b"" - b := c.GetBytesValue() un.str.WriteString(`b"`) - un.str.WriteString(bytesToOctets(b)) + un.str.WriteString(bytesToOctets([]byte(val))) un.str.WriteString(`"`) - case *exprpb.Constant_DoubleValue: + case types.Double: // represent the float using the minimum required digits - d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64) + d := strconv.FormatFloat(float64(val), 'g', -1, 64) un.str.WriteString(d) if !strings.Contains(d, ".") { un.str.WriteString(".0") } - case *exprpb.Constant_Int64Value: - i := strconv.FormatInt(c.GetInt64Value(), 10) + case types.Int: + i := strconv.FormatInt(int64(val), 10) un.str.WriteString(i) - case *exprpb.Constant_NullValue: + case types.Null: un.str.WriteString("null") - case *exprpb.Constant_StringValue: + case types.String: // strings will be double quoted with quotes escaped. - un.str.WriteString(strconv.Quote(c.GetStringValue())) - case *exprpb.Constant_Uint64Value: + un.str.WriteString(strconv.Quote(string(val))) + case types.Uint: // uint literals have a 'u' suffix. - ui := strconv.FormatUint(c.GetUint64Value(), 10) + ui := strconv.FormatUint(uint64(val), 10) un.str.WriteString(ui) un.str.WriteString("u") default: @@ -298,16 +299,16 @@ func (un *unparser) visitConst(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitIdent(expr *exprpb.Expr) error { - un.str.WriteString(expr.GetIdentExpr().GetName()) +func (un *unparser) visitIdent(expr ast.Expr) error { + un.str.WriteString(expr.AsIdent()) return nil } -func (un *unparser) visitList(expr *exprpb.Expr) error { - l := expr.GetListExpr() - elems := l.GetElements() +func (un *unparser) visitList(expr ast.Expr) error { + l := expr.AsList() + elems := l.Elements() optIndices := make(map[int]bool, len(elems)) - for _, idx := range l.GetOptionalIndices() { + for _, idx := range l.OptionalIndices() { optIndices[int(idx)] = true } un.str.WriteString("[") @@ -327,20 +328,20 @@ func (un *unparser) visitList(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitOptSelect(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - args := c.GetArgs() +func (un *unparser) visitOptSelect(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() operand := args[0] - field := args[1].GetConstExpr().GetStringValue() - return un.visitSelectInternal(operand, false, ".?", field) + field := args[1].AsLiteral().(types.String) + return un.visitSelectInternal(operand, false, ".?", string(field)) } -func (un *unparser) visitSelect(expr *exprpb.Expr) error { - sel := expr.GetSelectExpr() - return un.visitSelectInternal(sel.GetOperand(), sel.GetTestOnly(), ".", sel.GetField()) +func (un *unparser) visitSelect(expr ast.Expr) error { + sel := expr.AsSelect() + return un.visitSelectInternal(sel.Operand(), sel.IsTestOnly(), ".", sel.FieldName()) } -func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op string, field string) error { +func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op string, field string) error { // handle the case when the select expression was generated by the has() macro. if testOnly { un.str.WriteString("has(") @@ -358,34 +359,25 @@ func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op return nil } -func (un *unparser) visitStruct(expr *exprpb.Expr) error { - s := expr.GetStructExpr() - // If the message name is non-empty, then this should be treated as message construction. - if s.GetMessageName() != "" { - return un.visitStructMsg(expr) - } - // Otherwise, build a map. - return un.visitStructMap(expr) -} - -func (un *unparser) visitStructMsg(expr *exprpb.Expr) error { - m := expr.GetStructExpr() - entries := m.GetEntries() - un.str.WriteString(m.GetMessageName()) +func (un *unparser) visitStructMsg(expr ast.Expr) error { + m := expr.AsStruct() + fields := m.Fields() + un.str.WriteString(m.TypeName()) un.str.WriteString("{") - for i, entry := range entries { - f := entry.GetFieldKey() - if entry.GetOptionalEntry() { + for i, f := range fields { + field := f.AsStructField() + f := field.Name() + if field.IsOptional() { un.str.WriteString("?") } un.str.WriteString(f) un.str.WriteString(": ") - v := entry.GetValue() + v := field.Value() err := un.visit(v) if err != nil { return err } - if i < len(entries)-1 { + if i < len(fields)-1 { un.str.WriteString(", ") } } @@ -393,13 +385,14 @@ func (un *unparser) visitStructMsg(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitStructMap(expr *exprpb.Expr) error { - m := expr.GetStructExpr() - entries := m.GetEntries() +func (un *unparser) visitStructMap(expr ast.Expr) error { + m := expr.AsMap() + entries := m.Entries() un.str.WriteString("{") - for i, entry := range entries { - k := entry.GetMapKey() - if entry.GetOptionalEntry() { + for i, e := range entries { + entry := e.AsMapEntry() + k := entry.Key() + if entry.IsOptional() { un.str.WriteString("?") } err := un.visit(k) @@ -407,7 +400,7 @@ func (un *unparser) visitStructMap(expr *exprpb.Expr) error { return err } un.str.WriteString(": ") - v := entry.GetValue() + v := entry.Value() err = un.visit(v) if err != nil { return err @@ -420,16 +413,15 @@ func (un *unparser) visitStructMap(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitMaybeMacroCall(expr *exprpb.Expr) (bool, error) { - macroCalls := un.info.GetMacroCalls() - call, found := macroCalls[expr.GetId()] +func (un *unparser) visitMaybeMacroCall(expr ast.Expr) (bool, error) { + call, found := un.info.GetMacroCall(expr.ID()) if !found { return false, nil } return true, un.visit(call) } -func (un *unparser) visitMaybeNested(expr *exprpb.Expr, nested bool) error { +func (un *unparser) visitMaybeNested(expr ast.Expr, nested bool) error { if nested { un.str.WriteString("(") } @@ -453,12 +445,12 @@ func isLeftRecursive(op string) bool { // precedence of the (possible) operation represented in the input Expr. // // If the expr is not a Call, the result is false. -func isSamePrecedence(op string, expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil { +func isSamePrecedence(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind { return false } - c := expr.GetCallExpr() - other := c.GetFunction() + c := expr.AsCall() + other := c.FunctionName() return operators.Precedence(op) == operators.Precedence(other) } @@ -466,16 +458,16 @@ func isSamePrecedence(op string, expr *exprpb.Expr) bool { // than the (possible) operation represented in the input Expr. // // If the expr is not a Call, the result is false. -func isLowerPrecedence(op string, expr *exprpb.Expr) bool { - c := expr.GetCallExpr() - other := c.GetFunction() +func isLowerPrecedence(op string, expr ast.Expr) bool { + c := expr.AsCall() + other := c.FunctionName() return operators.Precedence(op) < operators.Precedence(other) } // Indicates whether the expr is a complex operator, i.e., a call expression // with 2 or more arguments. -func isComplexOperator(expr *exprpb.Expr) bool { - if expr.GetCallExpr() != nil && len(expr.GetCallExpr().GetArgs()) >= 2 { +func isComplexOperator(expr ast.Expr) bool { + if expr.Kind() == ast.CallKind && len(expr.AsCall().Args()) >= 2 { return true } return false @@ -484,19 +476,19 @@ func isComplexOperator(expr *exprpb.Expr) bool { // Indicates whether it is a complex operation compared to another. // expr is *not* considered complex if it is not a call expression or has // less than two arguments, or if it has a higher precedence than op. -func isComplexOperatorWithRespectTo(op string, expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 { +func isComplexOperatorWithRespectTo(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { return false } return isLowerPrecedence(op, expr) } // Indicate whether this is a binary or ternary operator. -func isBinaryOrTernaryOperator(expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 { +func isBinaryOrTernaryOperator(expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { return false } - _, isBinaryOp := operators.FindReverseBinaryOperator(expr.GetCallExpr().GetFunction()) + _, isBinaryOp := operators.FindReverseBinaryOperator(expr.AsCall().FunctionName()) return isBinaryOp || isSamePrecedence(operators.Conditional, expr) } diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 62df80a556..5551eb0bfa 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -847,7 +847,7 @@ func (p *Profile) HasFileLines() bool { // "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig deleted file mode 100644 index 2940ec92ac..0000000000 --- a/vendor/github.com/gorilla/websocket/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index 84039fec68..cd3fcd1ef7 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1 +1,25 @@ -coverage.coverprofile +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml deleted file mode 100644 index 34882139e1..0000000000 --- a/vendor/github.com/gorilla/websocket/.golangci.yml +++ /dev/null @@ -1,3 +0,0 @@ -run: - skip-dirs: - - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 0000000000..1931f40068 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index bb9d80bc9b..9171c97225 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,27 +1,22 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile deleted file mode 100644 index 603a63f50a..0000000000 --- a/vendor/github.com/gorilla/websocket/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: golangci-lint -golangci-lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint" - golangci-lint run -v - -.PHONY: gosec -gosec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec" - gosec -exclude-dir examples ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck" - govulncheck ./... - -.PHONY: verify -verify: golangci-lint gosec govulncheck - -.PHONY: test -test: - @echo "##### Running tests" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 1fd5e9c4e7..d33ed7fdd8 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,13 +1,10 @@ -# gorilla/websocket +# Gorilla WebSocket -![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) -[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) -[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. ### Documentation @@ -17,7 +14,6 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) -* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) ### Status @@ -34,3 +30,4 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 815b0ca5c8..04fdafee18 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -11,16 +11,13 @@ import ( "errors" "fmt" "io" - "log" - + "io/ioutil" "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" - - "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -228,7 +225,6 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || - //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -294,9 +290,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } err = c.SetDeadline(deadline) if err != nil { - if err := c.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + c.Close() return nil, err } return c, nil @@ -310,7 +304,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return nil, nil, err } if proxyURL != nil { - dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } @@ -336,9 +330,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h defer func() { if netConn != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() } }() @@ -408,7 +400,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) - resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } @@ -426,19 +418,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h break } - resp.Body = io.NopCloser(bytes.NewReader([]byte{})) + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - if err := netConn.SetDeadline(time.Time{}); err != nil { - return nil, nil, err - } + netConn.SetDeadline(time.Time{}) netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{MinVersion: tls.VersionTLS12} + return &tls.Config{} } return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 9fed0ef521..813ffb1e84 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,7 +8,6 @@ import ( "compress/flate" "errors" "io" - "log" "strings" "sync" ) @@ -34,9 +33,7 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { - panic(err) - } + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) return &flateReadWrapper{fr} } @@ -135,9 +132,7 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - if err := r.Close(); err != nil { - log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) - } + r.Close() } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 221e6cf798..5161ef81f6 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -6,11 +6,11 @@ package websocket import ( "bufio" - "crypto/rand" "encoding/binary" "errors" "io" - "log" + "io/ioutil" + "math/rand" "net" "strconv" "strings" @@ -181,20 +181,13 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) -// maskRand is an io.Reader for generating mask bytes. The reader is initialized -// to crypto/rand Reader. Tests swap the reader to a math/rand reader for -// reproducible results. -var maskRand = rand.Reader - -// newMaskKey returns a new 32 bit value for masking client frames. func newMaskKey() [4]byte { - var k [4]byte - _, _ = io.ReadFull(maskRand, k[:]) - return k + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} } func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok { + if e, ok := err.(net.Error); ok && e.Temporary() { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err @@ -379,9 +372,7 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - if _, err := c.br.Discard(len(p)); err != nil { - return p, err - } + c.br.Discard(len(p)) return p, err } @@ -396,9 +387,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return err } - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } + c.conn.SetWriteDeadline(deadline) if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { @@ -408,7 +397,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return c.writeFatal(err) } if frameType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) + c.writeFatal(ErrCloseSent) } return nil } @@ -449,7 +438,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er d := 1000 * time.Hour if !deadline.IsZero() { - d = time.Until(deadline) + d = deadline.Sub(time.Now()) if d < 0 { return errWriteTimeout } @@ -471,15 +460,13 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } + c.conn.SetWriteDeadline(deadline) _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) + c.writeFatal(ErrCloseSent) } return err } @@ -490,9 +477,7 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - if err := c.writer.Close(); err != nil { - log.Printf("websocket: discarding writer close error: %v", err) - } + c.writer.Close() c.writer = nil } @@ -645,7 +630,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { } if final { - _ = w.endMessage(errWriteClosed) + w.endMessage(errWriteClosed) return nil } @@ -810,7 +795,7 @@ func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { - if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } @@ -832,9 +817,7 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { - return noFrame, err - } + c.setReadRemaining(int64(p[1] & 0x7f)) c.readDecompress = false if rsv1 { @@ -939,9 +922,7 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { - return noFrame, err - } + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) return noFrame, ErrReadLimit } @@ -953,9 +934,7 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - if err := c.setReadRemaining(0); err != nil { - return noFrame, err - } + c.setReadRemaining(0) if err != nil { return noFrame, err } @@ -1002,9 +981,7 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { - return err - } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) return errors.New("websocket: " + message) } @@ -1021,9 +998,7 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - if err := c.reader.Close(); err != nil { - log.Printf("websocket: discarding reader close error: %v", err) - } + c.reader.Close() c.reader = nil } @@ -1079,9 +1054,7 @@ func (r *messageReader) Read(b []byte) (int, error) { } rem := c.readRemaining rem -= int64(n) - if err := c.setReadRemaining(rem); err != nil { - return 0, err - } + c.setReadRemaining(rem) if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1121,7 +1094,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { if err != nil { return messageType, nil, err } - p, err = io.ReadAll(r) + p, err = ioutil.ReadAll(r) return messageType, p, err } @@ -1163,9 +1136,7 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { - return err - } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) return nil } } @@ -1190,7 +1161,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil - } else if _, ok := err.(net.Error); ok { + } else if e, ok := err.(net.Error); ok && e.Temporary() { return nil } return err diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index 67d0968be8..d0742bf2a5 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,7 +9,6 @@ package websocket import "unsafe" -// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -23,7 +22,6 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. - //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -38,13 +36,11 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } - //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { - //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index 80f55d1eac..e0f466b72f 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -8,13 +8,10 @@ import ( "bufio" "encoding/base64" "errors" - "log" "net" "net/http" "net/url" "strings" - - "golang.org/x/net/proxy" ) type netDialerFunc func(network, addr string) (net.Conn, error) @@ -24,7 +21,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { } func init() { - proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } @@ -58,9 +55,7 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } @@ -69,16 +64,12 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } if resp.StatusCode != 200 { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 1e720e1da4..bb33597432 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -8,7 +8,6 @@ import ( "bufio" "errors" "io" - "log" "net/http" "net/url" "strings" @@ -184,9 +183,7 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if brw.Reader.Buffered() > 0 { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() return nil, errors.New("websocket: client sent data before handshake is complete") } @@ -251,34 +248,17 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. - if err := netConn.SetDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetDeadline(time.Time{}) if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) } if _, err = netConn.Write(p); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() return nil, err } if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetWriteDeadline(time.Time{}) } return c, nil @@ -376,12 +356,8 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) - if err := bw.WriteByte(0); err != nil { - panic(err) - } - if err := bw.Flush(); err != nil { - log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) - } + bw.WriteByte(0) + bw.Flush() bw.Reset(originalWriter) diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go index 7f38645348..a62b68ccb1 100644 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -1,3 +1,6 @@ +//go:build go1.17 +// +build go1.17 + package websocket import ( diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 0000000000..e1b2b44f6e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 9b1a629bff..31a5dee646 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + "crypto/sha1" "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + h := sha1.New() h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 0000000000..2e668f6b88 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c8..a65d88eb86 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 31553e7848..2f2b342431 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -148,6 +149,12 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM var pairs []string for key, vals := range req.Header { key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + for _, val := range vals { // For backwards-compatibility, pass through 'authorization' header with no prefix. if key == "Authorization" { @@ -181,18 +188,17 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) } + xff := req.Header.Values(xForwardedFor) if addr := req.RemoteAddr; addr != "" { if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } + xff = append(xff, remoteIP) } } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } if timeout != 0 { - //nolint:govet // The context outlives this function ctx, _ = context.WithTimeout(ctx, timeout) } if len(pairs) == 0 { @@ -399,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 230cac7b86..01f5734191 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -71,7 +71,7 @@ func HTTPStatusFromCode(code codes.Code) int { case codes.DataLoss: return http.StatusInternalServerError default: - grpclog.Infof("Unknown gRPC error code: %v", code) + grpclog.Warningf("Unknown gRPC error code: %v", code) return http.StatusInternalServerError } } @@ -93,6 +93,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,31 +101,40 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s, merr) + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } return } md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -148,7 +158,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh w.WriteHeader(st) if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 19d9d37fff..9005d6a0bf 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -41,7 +41,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field m, ok := item.node.(map[string]interface{}) switch { - case ok: + case ok && len(m) > 0: // if the item is an object, then enqueue all of its children for k, v := range m { if item.msg == nil { @@ -96,6 +96,8 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field queue = append(queue, child) } } + case ok && len(m) == 0: + fallthrough case len(item.path) > 0: // otherwise, it's a leaf node so print its path fm.Paths = append(fm.Paths, item.path) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 5e14cf8b0e..9f50a569e9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,9 +3,11 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" + "strconv" "strings" "google.golang.org/genproto/googleapis/api/httpbody" @@ -17,16 +19,10 @@ import ( // ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - + rc := http.NewResponseController(w) md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") http.Error(w, "unexpected error", http.StatusInternalServerError) return } @@ -60,20 +56,27 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + w.Header().Set("Content-Type", marshaler.ContentType(respRw)) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -81,20 +84,29 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) + grpclog.Errorf("Failed to marshal response chunk: %v", err) handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) + grpclog.Errorf("Failed to send response chunk: %v", err) return } wroteHeader = true if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) return } - f.Flush() } } @@ -136,7 +148,7 @@ type responseBody interface { func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -160,21 +172,30 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { - grpclog.Infof("Marshal error: %v", err) + grpclog.Errorf("Marshal error: %v", err) HTTPError(ctx, mux, marshaler, w, req, err) return } + if !doForwardTrailers { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { @@ -193,8 +214,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil @@ -209,15 +229,15 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar } buf, err := marshaler.Marshal(msg) if err != nil { - grpclog.Infof("Failed to marshal an error: %v", err) + grpclog.Errorf("Failed to marshal an error: %v", err) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to notify error to client: %v", err) + grpclog.Errorf("Failed to notify error to client: %v", err) return } if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) return } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go index d6aa825783..fe52081ab9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -24,6 +24,11 @@ func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + // Unmarshal unmarshals JSON data into "v". func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { return json.Unmarshal(data, v) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 51b8247da2..8376d1e0ef 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -30,10 +30,6 @@ func (*JSONPb) ContentType(_ interface{}) string { // Marshal marshals "v" into JSON. func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - var buf bytes.Buffer if err := j.marshalTo(&buf, v); err != nil { return nil, err @@ -48,9 +44,17 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { if err != nil { return err } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } _, err = w.Write(buf) return err } + b, err := j.MarshalOptions.Marshal(p) if err != nil { return err @@ -150,9 +154,6 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { } m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } return json.Marshal(m) } if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index a714de0240..0b051e6e89 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -46,7 +46,7 @@ func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, out for _, contentTypeVal := range r.Header[contentTypeHeader] { contentType, _, err := mime.ParseMediaType(contentTypeVal) if err != nil { - grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) continue } if m, ok := mux.marshalers.mimeMap[contentType]; ok { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 628e1fde1c..60c2065ddc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -69,6 +76,24 @@ type ServeMux struct { // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -277,13 +311,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -341,13 +379,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) if err := r.ParseForm(); err != nil { _, outboundMarshaler := MarshalerForRequest(s, r) sterr := status.Error(codes.InvalidArgument, err.Error()) s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } + r.Method = strings.ToUpper(override) } var pathComponents []string @@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +522,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go index 8f90d15a56..e54507145b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -52,13 +52,13 @@ type Pattern struct { // It returns an error if the given definition is invalid. func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { if version != 1 { - grpclog.Infof("unsupported version: %d", version) + grpclog.Errorf("unsupported version: %d", version) return Pattern{}, ErrInvalidPattern } l := len(ops) if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) + grpclog.Errorf("odd number of ops codes: %d", l) return Pattern{}, ErrInvalidPattern } @@ -81,14 +81,14 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpPushM: if pushMSeen { - grpclog.Infof("pushM appears twice") + grpclog.Error("pushM appears twice") return Pattern{}, ErrInvalidPattern } pushMSeen = true stack++ case utilities.OpLitPush: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) + grpclog.Errorf("negative literal index: %d", op.operand) return Pattern{}, ErrInvalidPattern } if pushMSeen { @@ -97,18 +97,18 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpConcatN: if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) + grpclog.Errorf("negative concat size: %d", op.operand) return Pattern{}, ErrInvalidPattern } stack -= op.operand if stack < 0 { - grpclog.Info("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } stack++ case utilities.OpCapture: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) + grpclog.Errorf("variable name index out of bound: %d", op.operand) return Pattern{}, ErrInvalidPattern } v := pool[op.operand] @@ -116,11 +116,11 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er vars = append(vars, v) stack-- if stack < 0 { - grpclog.Infof("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } default: - grpclog.Infof("invalid opcode: %d", op.code) + grpclog.Errorf("invalid opcode: %d", op.code) return Pattern{}, ErrInvalidPattern } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index d01933c4fd..fe634174b8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -51,11 +51,13 @@ func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *u key = match[1] values = append([]string{match[2]}, values...) } - fieldPath := strings.Split(key, ".") + + msgValue := msg.ProtoReflect() + fieldPath := normalizeFieldPath(msgValue, strings.Split(key, ".")) if filter.HasCommonPrefix(fieldPath) { continue } - if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil { + if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil { return err } } @@ -68,6 +70,38 @@ func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value stri return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) } +func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string { + newFieldPath := make([]string, 0, len(fieldPath)) + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + fieldDesc := fields.ByTextName(fieldName) + if fieldDesc == nil { + fieldDesc = fields.ByJSONName(fieldName) + } + if fieldDesc == nil { + // return initial field path values if no matching message field was found + return fieldPath + } + + newFieldPath = append(newFieldPath, string(fieldDesc.Name())) + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated { + return fieldPath + } + + // Get the nested message + msgValue = msgValue.Get(fieldDesc).Message() + } + + return newFieldPath +} + func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { if len(fieldPath) < 1 { return errors.New("no field path") diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 0000000000..402433593c --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/fvbommel/sortorder/.gitignore b/vendor/github.com/klauspost/compress/.gitignore similarity index 72% rename from vendor/github.com/fvbommel/sortorder/.gitignore rename to vendor/github.com/klauspost/compress/.gitignore index c021733e25..d31b378152 100644 --- a/vendor/github.com/fvbommel/sortorder/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -2,18 +2,31 @@ *.o *.a *.so + # Folders _obj _test + # Architecture specific extensions/prefixes *.[568vq] [568vq].out + *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* + _testmain.go + *.exe *.test *.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 0000000000..a22953805c --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 0000000000..87d5574777 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 0000000000..684a30853a --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,714 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 0000000000..ca6685e2b7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 0000000000..ea5a692d51 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 0000000000..ea7324da67 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 0000000000..f65eb3909c --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 0000000000..e82fa3bb7b --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 0000000000..abade2d605 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 0000000000..074018d8f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 0000000000..535cbadfde --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 0000000000..aff942205f --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 0000000000..b3d262958f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 0000000000..8b6e5c6638 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 0000000000..e36d9742f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 0000000000..0ebc9aaac7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 0000000000..84aa3d12f0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 0000000000..0f56b02d74 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errors, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 0000000000..ba7e8e6b02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 0000000000..c4c7ab2d1f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 0000000000..908c17de63 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 0000000000..77ecd68e0a --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 0000000000..3954c51219 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 0000000000..e802579c4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 0000000000..4465fbe9e9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 0000000000..6050c10f4c --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 0000000000..40796a49d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 0000000000..77395a6b8b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 0000000000..13c6040a5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 0000000000..2754bac6f1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 0000000000..34d01f4aa6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 0000000000..5a4412f907 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.19 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 0000000000..92e2347bbc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 0000000000..25ca983941 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return len(b.in) == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 0000000000..1952f175b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 0000000000..9c28840c3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,731 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + if debugDecoder { + println("Reading table for", tableIndex(i)) + } + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 0000000000..32a7f401d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,909 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 0000000000..01a01e486e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 0000000000..55a388553d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 0000000000..0e59a242d8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 0000000000..6a5a2988b6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 0000000000..bbca17234a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 0000000000..774c5f00fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 0000000000..b7b83164bc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,565 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 0000000000..5ca46038ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 0000000000..4613724e9d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 0000000000..84a79fde76 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1252 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 0000000000..d36be7bd8c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 0000000000..f45a3da7da --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 0000000000..a79c4a527c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,622 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 0000000000..20671dcb91 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 0000000000..e47af66e7c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,415 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 0000000000..667ca06794 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 0000000000..2f8860a722 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 0000000000..d04a829b0a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 0000000000..bcde398695 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 0000000000..8adfebb029 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 0000000000..ab26326a8f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 0000000000..474cb77d2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 0000000000..5d73c21ebd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 0000000000..09164856d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 0000000000..24b53065f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 0000000000..777290d44c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 0000000000..fc40c82001 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 0000000000..ddb63aa91b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 0000000000..ae7d4d3295 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 0000000000..d4221edf4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 0000000000..0be16cefc7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 0000000000..6f3b0cb102 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 0000000000..f41932b7a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 0000000000..0782b86e3d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 0000000000..57b9c31c02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 0000000000..d7fe6d82d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 0000000000..c59f17e07a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 0000000000..f5591fa1e8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 0000000000..2fb35b788c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 0000000000..8014174a77 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 0000000000..ec13594e89 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 0000000000..29c15c8c4e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 0000000000..4be7cc7367 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,121 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/kylelemons/godebug/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go new file mode 100644 index 0000000000..200e596c62 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/diff/diff.go @@ -0,0 +1,186 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package diff + +import ( + "bytes" + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + + chunks := DiffChunks(aLines, bLines) + + buf := new(bytes.Buffer) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/vendor/github.com/moby/spdystream/connection.go b/vendor/github.com/moby/spdystream/connection.go index d906bb05ce..1394d0ad4c 100644 --- a/vendor/github.com/moby/spdystream/connection.go +++ b/vendor/github.com/moby/spdystream/connection.go @@ -208,9 +208,10 @@ type Connection struct { nextStreamId spdy.StreamId receivedStreamId spdy.StreamId - pingIdLock sync.Mutex - pingId uint32 - pingChans map[uint32]chan error + // pingLock protects pingChans and pingId + pingLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error shutdownLock sync.Mutex shutdownChan chan error @@ -274,16 +275,20 @@ func NewConnection(conn net.Conn, server bool) (*Connection, error) { // returns the response time func (s *Connection) Ping() (time.Duration, error) { pid := s.pingId - s.pingIdLock.Lock() + s.pingLock.Lock() if s.pingId > 0x7ffffffe { s.pingId = s.pingId - 0x7ffffffe } else { s.pingId = s.pingId + 2 } - s.pingIdLock.Unlock() pingChan := make(chan error) s.pingChans[pid] = pingChan - defer delete(s.pingChans, pid) + s.pingLock.Unlock() + defer func() { + s.pingLock.Lock() + delete(s.pingChans, pid) + s.pingLock.Unlock() + }() frame := &spdy.PingFrame{Id: pid} startTime := time.Now() @@ -612,10 +617,14 @@ func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { } func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { - if s.pingId&0x01 != frame.Id&0x01 { + s.pingLock.Lock() + pingId := s.pingId + pingChan, pingOk := s.pingChans[frame.Id] + s.pingLock.Unlock() + + if pingId&0x01 != frame.Id&0x01 { return s.framer.WriteFrame(frame) } - pingChan, pingOk := s.pingChans[frame.Id] if pingOk { close(pingChan) } @@ -703,7 +712,9 @@ func (s *Connection) shutdown(closeTimeout time.Duration) { var timeout <-chan time.Time if closeTimeout > time.Duration(0) { - timeout = time.After(closeTimeout) + timer := time.NewTimer(closeTimeout) + defer timer.Stop() + timeout = timer.C } streamsClosed := make(chan bool) @@ -730,17 +741,23 @@ func (s *Connection) shutdown(closeTimeout time.Duration) { } if err != nil { - duration := 10 * time.Minute - time.AfterFunc(duration, func() { - select { - case err, ok := <-s.shutdownChan: - if ok { - debugMessage("Unhandled close error after %s: %s", duration, err) - } - default: - } - }) - s.shutdownChan <- err + // default to 1 second + duration := time.Second + // if a closeTimeout was given, use that, clipped to 1s-10m + if closeTimeout > time.Second { + duration = closeTimeout + } + if duration > 10*time.Minute { + duration = 10 * time.Minute + } + timer := time.NewTimer(duration) + defer timer.Stop() + select { + case s.shutdownChan <- err: + // error was handled + case <-timer.C: + debugMessage("Unhandled close error after %s: %s", duration, err) + } } close(s.shutdownChan) } @@ -799,7 +816,9 @@ func (s *Connection) CloseWait() error { func (s *Connection) Wait(waitTimeout time.Duration) error { var timeout <-chan time.Time if waitTimeout > time.Duration(0) { - timeout = time.After(waitTimeout) + timer := time.NewTimer(waitTimeout) + defer timer.Stop() + timeout = timer.C } select { diff --git a/vendor/github.com/moby/spdystream/stream.go b/vendor/github.com/moby/spdystream/stream.go index 404e3c02df..171c1e9e33 100644 --- a/vendor/github.com/moby/spdystream/stream.go +++ b/vendor/github.com/moby/spdystream/stream.go @@ -305,6 +305,8 @@ func (s *Stream) Identifier() uint32 { // IsFinished returns whether the stream has finished // sending data func (s *Stream) IsFinished() bool { + s.finishLock.Lock() + defer s.finishLock.Unlock() return s.finished } diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 0a89497999..76577dc78e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,28 @@ +## 2.19.0 + +### Features + +[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering. + +## 2.18.0 + +### Features +- Add --slience-skips and --force-newlines [f010b65] +- fail when no tests were run and --fail-on-empty was set [d80eebe] + +### Fixes +- Fix table entry context edge case [42013d6] + +### Maintenance +- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7] +- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7] + +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + ## 2.17.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 1da92fe7ee..80de566a52 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Make sure all the tests succeed via `ginkgo -r -p` -- Vet your changes via `go vet ./...` -- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Run `make` or: + - Install ginkgo locally via `go install ./...` + - Make sure all the tests succeed via `ginkgo -r -p` + - Vet your changes via `go vet ./...` +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! \ No newline at end of file +Thanks for supporting Ginkgo! diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile new file mode 100644 index 0000000000..cb099aff95 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -0,0 +1,11 @@ +# default task since it's first +.PHONY: all +all: vet test + +.PHONY: test +test: + go run github.com/onsi/ginkgo/v2/ginkgo -r -p + +.PHONY: vet +vet: + go vet ./... diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bdc3..0e6ae1f290 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index a994ee3d67..a3c9e6bf18 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -489,10 +489,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } - if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") suite.report.SuiteSucceeded = false } + + if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set") + suite.report.SuiteSucceeded = false + } } if ranBeforeSuite { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 980973370e..480730486a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -202,6 +202,11 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -278,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 2a3215b513..562e0f62ba 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -177,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index a3aef821bf..c7de7a8be0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -269,11 +269,15 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if internalBodyType.NumIn() > 0. { + if internalBodyType.NumIn() > 0 { if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) { hasContext = true + if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) { + // we allow you to pass in a non-nil context + hasContext = false + } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index cef273ee1f..66463cf5ea 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -90,6 +91,8 @@ type ReporterConfig struct { FullTrace bool ShowNodeEvents bool GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -275,6 +278,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -334,6 +339,10 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651e7..7fdc8aa23f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 5dd0140cd3..acab03492a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.2" +const VERSION = "2.19.0" diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee..b9cc55abbb 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 0000000000..65d761bc9f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 0000000000..8547c8dfd1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 0000000000..2e45780b74 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10e..cc4ef1077e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0..520cbd7d41 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f4..5117464172 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395..519db348a7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d644..a4fa6eabd7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237..9d9b81ab44 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed..62a4e7ad9a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea..14d56d2d06 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83..315eab5f17 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbea..e598e66e68 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a0..c6fd2f58b7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446..1ab0e47965 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go index dd29cccc30..ea46f38ecf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -68,7 +68,7 @@ func (l *Linter) Lint() ([]Problem, error) { var problems []Problem if l.r != nil { - d := expfmt.NewDecoder(l.r, expfmt.FmtText) + d := expfmt.NewDecoder(l.r, expfmt.NewFormat(expfmt.TypeTextPlain)) mf := &dto.MetricFamily{} for { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab6..e1441598da 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 0000000000..fdc1e62394 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e16..de52cfee44 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 269f56435b..e0ac346665 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -42,9 +42,8 @@ import ( "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/proto" @@ -184,9 +183,8 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -222,6 +220,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -254,6 +277,15 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str if metricNames != nil { got = filterMetrics(got, metricNames) expected = filterMetrics(expected, metricNames) + if len(metricNames) > len(got) { + var missingMetricNames []string + for _, name := range metricNames { + if ok := hasMetricByName(got, name); !ok { + missingMetricNames = append(missingMetricNames, name) + } + } + return fmt.Errorf("expected metric name(s) not found: %v", missingMetricNames) + } } return compare(got, expected) @@ -265,85 +297,24 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str // result. func compare(got, want []*dto.MetricFamily) error { var gotBuf, wantBuf bytes.Buffer - enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) + enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) for _, mf := range got { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding gathered metrics failed: %w", err) } } - enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) + enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) for _, mf := range want { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { return fmt.Errorf(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { @@ -356,3 +327,12 @@ func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFam } return filtered } + +func hasMetricByName(metrics []*dto.MetricFamily, name string) bool { + for _, mf := range metrics { + if mf.GetName() == name { + return true + } + } + return false +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f8..2c808eece0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index a909b171c8..1448439b7f 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -73,16 +73,16 @@ func ResponseFormat(h http.Header) Format { // NewDecoder returns a new decoder based on the given input format. // If the input format does not imply otherwise, a text format decoder is returned. func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} + switch format.FormatType() { + case TypeProtoDelim: + return &protoDecoder{r: bufio.NewReader(r)} } return &textDecoder{r: r} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { - r io.Reader + r protodelim.Reader } // Decode implements the Decoder interface. @@ -90,7 +90,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { opts := protodelim.UnmarshalOptions{ MaxSize: -1, } - if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { + if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 02b7a5e812..cf0c150c2e 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -21,7 +21,9 @@ import ( "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "github.com/prometheus/common/model" + + "github.com/munnerz/goautoneg" dto "github.com/prometheus/client_model/go" ) @@ -61,23 +63,32 @@ func (ec encoderCloser) Close() error { // as the support is still experimental. To include the option to negotiate // FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return FmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return FmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return FmtText + escapingScheme } } - return FmtText + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -85,29 +96,40 @@ func Negotiate(h http.Header) Format { // temporary and will disappear once FmtOpenMetrics is fully supported and as // such may be negotiated by the normal Negotiate function. func NegotiateIncludingOpenMetrics(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return FmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return FmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - if ver == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0 + switch ver { + case OpenMetricsVersion_1_0_0: + return FmtOpenMetrics_1_0_0 + escapingScheme + default: + return FmtOpenMetrics_0_0_1 + escapingScheme } - return FmtOpenMetrics_0_0_1 } } - return FmtText + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All @@ -116,9 +138,19 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // for FmtOpenMetrics, but a future (breaking) release will add the Close method // to the Encoder interface directly. The current version of the Encoder // interface is kept for backwards compatibility. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: +// In cases where the Format does not allow for UTF-8 names, the global +// NameEscapingScheme will be applied. +// +// NewEncoder can be called with additional options to customize the OpenMetrics text output. +// For example: +// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) +// +// Extra options are ignored for all other formats. +func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { + escapingScheme := format.ToEscapingScheme() + + switch format.FormatType() { + case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { _, err := protodelim.MarshalTo(w, v) @@ -126,34 +158,34 @@ func NewEncoder(w io.Writer, format Format) Encoder { }, close: func() error { return nil }, } - case FmtProtoCompact: + case TypeProtoCompact: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) + _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) return err }, close: func() error { return nil }, } - case FmtProtoText: + case TypeProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(v)) + _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) return err }, close: func() error { return nil }, } - case FmtText: + case TypeTextPlain: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) + _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, } - case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: + case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c4cb20f0d3..d942af8edd 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -14,26 +14,48 @@ // Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +import ( + "fmt" + "strings" + + "github.com/prometheus/common/model" +) + // Format specifies the HTTP content type of the different wire protocols. type Format string -// Constants to assemble the Content-Type values for the different wire protocols. +// Constants to assemble the Content-Type values for the different wire +// protocols. The Content-Type strings here are all for the legacy exposition +// formats, where valid characters for metric names and label names are limited. +// Support for arbitrary UTF-8 characters in those names is already partially +// implemented in this module (see model.ValidationScheme), but to actually use +// it on the wire, new content-type strings will have to be agreed upon and +// added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -41,3 +63,145 @@ const ( hdrContentType = "Content-Type" hdrAccept = "Accept" ) + +// FormatType is a Go enum representing the overall category for the given +// Format. As the number of Format permutations increases, doing basic string +// comparisons are not feasible, so this enum captures the most useful +// high-level attribute of the Format string. +type FormatType int + +const ( + TypeUnknown FormatType = iota + TypeProtoCompact + TypeProtoDelim + TypeProtoText + TypeTextPlain + TypeOpenMetrics +) + +// NewFormat generates a new Format from the type provided. Mostly used for +// tests, most Formats should be generated as part of content negotiation in +// encode.go. If a type has more than one version, the latest version will be +// returned. +func NewFormat(t FormatType) Format { + switch t { + case TypeProtoCompact: + return FmtProtoCompact + case TypeProtoDelim: + return FmtProtoDelim + case TypeProtoText: + return FmtProtoText + case TypeTextPlain: + return FmtText + case TypeOpenMetrics: + return FmtOpenMetrics_1_0_0 + default: + return FmtUnknown + } +} + +// NewOpenMetricsFormat generates a new OpenMetrics format matching the +// specified version number. +func NewOpenMetricsFormat(version string) (Format, error) { + if version == OpenMetricsVersion_0_0_1 { + return FmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return FmtOpenMetrics_1_0_0, nil + } + return FmtUnknown, fmt.Errorf("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) +} + +// FormatType deduces an overall FormatType for the given format. +func (f Format) FormatType() FormatType { + toks := strings.Split(string(f), ";") + params := make(map[string]string) + for i, t := range toks { + if i == 0 { + continue + } + args := strings.Split(t, "=") + if len(args) != 2 { + continue + } + params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) + } + + switch strings.TrimSpace(toks[0]) { + case ProtoType: + if params["proto"] != ProtoProtocol { + return TypeUnknown + } + switch params["encoding"] { + case "delimited": + return TypeProtoDelim + case "text": + return TypeProtoText + case "compact-text": + return TypeProtoCompact + default: + return TypeUnknown + } + case OpenMetricsType: + if params["charset"] != "utf-8" { + return TypeUnknown + } + return TypeOpenMetrics + case "text/plain": + v, ok := params["version"] + if !ok { + return TypeTextPlain + } + if v == TextVersion { + return TypeTextPlain + } + return TypeUnknown + default: + return TypeUnknown + } +} + +// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the +// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid +// "escaping" term exists, that will be used. Otherwise, the global default will +// be returned. +func (format Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(format), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + continue + } + key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + if key == model.EscapingKey { + scheme, err := model.ToEscapingScheme(value) + if err != nil { + return model.NameEscapingScheme + } + return scheme + } + } + return model.NameEscapingScheme +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 21cdddcf05..11c8ff4b9d 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,47 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +type encoderOption struct { + withCreatedLines bool + withUnit bool +} + +type EncoderOption func(*encoderOption) + +// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder +// to include _created lines (See +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// Created timestamps can improve the accuracy of series reset detection, but +// come with a bandwidth cost. +// +// At the time of writing, created timestamp ingestion is still experimental in +// Prometheus and need to be enabled with the feature-flag +// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are +// still possible. Therefore, it is recommended to use this feature with caution. +func WithCreatedLines() EncoderOption { + return func(t *encoderOption) { + t.withCreatedLines = true + } +} + +// WithUnit is an EncoderOption enabling a set unit to be written to the output +// and to be added to the metric name, if it's not there already, as a suffix. +// Without opting in this way, the unit will not be added to the metric name and, +// on top of that, the unit will not be passed onto the output, even if it +// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. +func WithUnit() EncoderOption { + return func(t *encoderOption) { + t.withUnit = true + } +} + // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the // OpenMetrics text format and writes the resulting lines to 'out'. It returns // the number of bytes written and any error encountered. The output will have @@ -35,6 +71,18 @@ import ( // sanity checks. If the input contains duplicate metrics or invalid metric or // label names, the conversion will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This function fulfills the type 'expfmt.encoder'. // // Note that OpenMetrics requires a final `# EOF` line. Since this function acts @@ -47,20 +95,34 @@ import ( // Prometheus to OpenMetrics or vice versa: // // - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, +// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` +// lines. A counter with a missing `_total` suffix is not an error. However, // its type will be set to `unknown` in that case to avoid invalid OpenMetrics // output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - According to the OM specs, the `# UNIT` line is optional, but if populated, +// the unit has to be present in the metric name as its suffix: +// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// However, in order to accommodate any potential scenario where such a change in the +// metric name is not desirable, the users are here given the choice of either explicitly +// opt in, in case they wish for the unit to be included in the output AND in the metric name +// as a suffix (see the description of the WithUnit function above), +// or not to opt in, in case they don't want for any of that to happen. +// +// - No support for the following (optional) features: info type, +// stateset type, gaugehistogram type. // // - The size of exemplar labels is not checked (i.e. it's possible to create // exemplars that are larger than allowed by the OpenMetrics specification). // // - The value of Counters is not checked. (OpenMetrics doesn't allow counters // with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { + toOM := encoderOption{} + for _, option := range options { + option(&toOM) + } + name := in.GetName() if name == "" { return 0, fmt.Errorf("MetricFamily has no name: %s", in) @@ -83,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } var ( - n int - metricType = in.GetType() - shortName = name + n int + metricType = in.GetType() + compliantName = name ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { + compliantName = name[:len(name)-6] + } + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { + compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) } // Comments, first HELP, then TYPE. @@ -98,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -124,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -151,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } + if toOM.withUnit && in.Unit != nil { + n, err = w.WriteString("# UNIT ") + written += n + if err != nil { + return + } + n, err = writeName(w, compliantName) + written += n + if err != nil { + return + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Unit, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + + var createdTsBytesWritten int // Finally the samples, one line for each. + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + compliantName = compliantName + "_total" + } for _, metric := range in.Metric { switch metricType { case dto.MetricType_COUNTER: if metric.Counter == nil { return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, + "expected counter in metric %s %s", compliantName, metric, ) } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Counter.GetValue(), 0, false, metric.Counter.Exemplar, ) + if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_GAUGE: if metric.Gauge == nil { return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, + "expected gauge in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Gauge.GetValue(), 0, false, nil, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, + "expected untyped in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Untyped.GetValue(), 0, false, nil, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, + "expected summary in metric %s %s", compliantName, metric, ) } for _, q := range metric.Summary.Quantile { n, err = writeOpenMetricsSample( - w, name, "", metric, + w, compliantName, "", metric, model.QuantileLabel, q.GetQuantile(), q.GetValue(), 0, false, nil, @@ -210,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), 0, false, nil, ) @@ -219,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Summary.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, + "expected histogram in metric %s %s", compliantName, metric, ) } infSeen := false for _, b := range metric.Histogram.Bucket { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), 0, b.GetCumulativeCount(), true, b.Exemplar, @@ -247,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } if !infSeen { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, math.Inf(+1), 0, metric.Histogram.GetSampleCount(), true, nil, @@ -258,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), 0, false, nil, ) @@ -267,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Histogram.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) + n += createdTsBytesWritten + } default: return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, + "unexpected type in metric %s %s", compliantName, metric, ) } written += n @@ -303,21 +410,9 @@ func writeOpenMetricsSample( floatValue float64, intValue uint64, useIntValue bool, exemplar *dto.Exemplar, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -350,7 +445,7 @@ func writeOpenMetricsSample( return written, err } } - if exemplar != nil { + if exemplar != nil && len(exemplar.Label) > 0 { n, err = writeExemplar(w, exemplar) written += n if err != nil { @@ -365,27 +460,58 @@ func writeOpenMetricsSample( return written, nil } -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( +// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but +// formats the float in OpenMetrics style. +func writeOpenMetricsNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces, quoted. + if !model.IsValidLegacyMetricName(name) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -442,6 +568,49 @@ func writeOpenMetricsLabelPairs( return written, nil } +// writeOpenMetricsCreated writes the created timestamp for a single time series +// following OpenMetrics text format to w, given the metric name, the metric proto +// message itself, optionally a suffix to be removed, e.g. '_total' for counters, +// an additional label name with a float64 value (use empty string as label name if +// not required) and the timestamp that represents the created timestamp. +// The function returns the number of bytes written and any error encountered. +func writeOpenMetricsCreated(w enhancedWriter, + name, suffixToTrim string, metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + createdTimestamp *timestamppb.Timestamp, +) (int, error) { + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + // writeExemplar writes the provided exemplar in OpenMetrics format to w. The // function returns the number of bytes written and any error encountered. func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { @@ -451,7 +620,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) written += n if err != nil { return written, err diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 2946b8f1a6..4b86434b33 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -62,6 +62,18 @@ var ( // contains duplicate metrics or invalid metric or label names, the conversion // will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This method fulfills the type 'prometheus.encoder'. func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. @@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -280,21 +292,9 @@ func writeSample( additionalLabelName string, additionalLabelValue float64, value float64, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -330,32 +330,64 @@ func writeSample( return written, nil } -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( +// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given metric name and additional label pair into text formatted as +// required by the text format and writes it to 'w'. An empty slice in +// combination with an empty string 'additionalLabelName' results in nothing +// being written. Otherwise, the label pairs are written, escaped as required by +// the text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. If the metric name is not +// legacy-valid, it will be put inside the brackets as well. Legacy-invalid +// label names will also be quoted. +func writeNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces. + if !model.IsValidLegacyMetricName(name) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) { numBufPool.Put(bp) return written, err } + +// writeName writes a string as-is if it complies with the legacy naming +// scheme, or escapes it in double quotes if not. +func writeName(w enhancedWriter, name string) (int, error) { + if model.IsValidLegacyMetricName(name) { + return w.WriteString(name) + } + var written int + var err error + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + var n int + n, err = writeEscapedString(w, name, true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af..f085a923f6 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d58..0000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index a21b9d15dd..0000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 178fdbaf61..80d1fe944e 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool { // Status returns the status of the alert. func (a *Alert) Status() AlertStatus { - if a.Resolved() { + return a.StatusAt(time.Now()) +} + +// StatusAt returns the status of the alert at the given timestamp. +func (a *Alert) StatusAt(ts time.Time) AlertStatus { + if a.ResolvedAt(ts) { return AlertResolved } return AlertFiring @@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool { return false } +// HasFiringAt returns true iff one of the alerts is not resolved +// at the time ts. +func (as Alerts) HasFiringAt(ts time.Time) bool { + for _, a := range as { + if !a.ResolvedAt(ts) { + return true + } + } + return false +} + // Status returns StatusFiring iff at least one of the alerts is firing. func (as Alerts) Status() AlertStatus { if as.HasFiring() { @@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus { } return AlertResolved } + +// StatusAt returns StatusFiring iff at least one of the alerts is firing +// at the time ts. +func (as Alerts) StatusAt(ts time.Time) AlertStatus { + if as.HasFiringAt(ts) { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index ef89563354..73b7aa3e60 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,10 +97,27 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + switch NameValidationScheme { + case LegacyValidation: + return ln.IsValidLegacy() + case UTF8Validation: + return utf8.ValidString(string(ln)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { if len(ln) == 0 { return false } @@ -164,7 +181,7 @@ func (l LabelNames) String() string { // A LabelValue is an associated value for a LabelName. type LabelValue string -// IsValid returns true iff the string is a valid UTF8. +// IsValid returns true iff the string is a valid UTF-8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index 6eda08a739..d0ad88da33 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -17,7 +17,6 @@ import ( "encoding/json" "fmt" "sort" - "strings" ) // A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet @@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet { return result } -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - // Fingerprint returns the LabelSet's fingerprint. func (ls LabelSet) Fingerprint() Fingerprint { return labelSetToFingerprint(ls) diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go new file mode 100644 index 0000000000..abb2c90018 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "bytes" + "slices" + "strconv" +) + +// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. +func (l LabelSet) String() string { + var lna [32]string // On stack to avoid memory allocation for sorting names. + labelNames := lna[:0] + for name := range l { + labelNames = append(labelNames, string(name)) + } + slices.Sort(labelNames) + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + b.WriteByte('{') + for i, name := range labelNames { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)]))) + } + b.WriteByte('}') + return b.String() +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index f8c5eabaa9..f50966bc49 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -18,6 +18,80 @@ import ( "regexp" "sort" "strings" + "unicode/utf8" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" +) + +var ( + // NameValidationScheme determines the method of name validation to be used by + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode + // in isolation from other components that don't support UTF-8 may result in + // bugs or other undefined behavior. This value is intended to be set by + // UTF-8-aware binaries as part of their startup. To avoid need for locking, + // this value should be set once, ideally in an init(), before multiple + // goroutines are started. + NameValidationScheme = LegacyValidation + + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping +) + +// ValidationScheme is a Go enum for determining how metric and label names will +// be validated by this library. +type ValidationScheme int + +const ( + // LegacyValidation is a setting that requirets that metric and label names + // conform to the original Prometheus character requirements described by + // MetricNameRE and LabelNameRE. + LegacyValidation ValidationScheme = iota + + // UTF8Validation only requires that metric and label names be valid UTF-8 + // strings. + UTF8Validation +) + +type EscapingScheme int + +const ( + // NoEscaping indicates that a name will not be escaped. Unescaped names that + // do not conform to the legacy validity check will use a new exposition + // format syntax that will be officially standardized in future versions. + NoEscaping EscapingScheme = iota + + // UnderscoreEscaping replaces all legacy-invalid characters with underscores. + UnderscoreEscaping + + // DotsEscaping is similar to UnderscoreEscaping, except that dots are + // converted to `_dot_` and pre-existing underscores are converted to `__`. + DotsEscaping + + // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid + // characters with the unicode value, surrounded by underscores. Single + // underscores are replaced with double underscores. + ValueEncodingEscaping +) + +const ( + // EscapingKey is the key in an Accept or Content-Type header that defines how + // metric and label names that do not conform to the legacy character + // requirements should be escaped when being scraped by a legacy prometheus + // system. If a system does not explicitly pass an escaping parameter in the + // Accept header, the default NameEscapingScheme will be used. + EscapingKey = "escaping" + + // Possible values for Escaping Key: + AllowUTF8 = "allow-utf-8" // No escaping required. + EscapeUnderscores = "underscores" + EscapeDots = "dots" + EscapeValues = "values" ) // MetricNameRE is a regular expression matching valid metric @@ -84,17 +158,303 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { + case LegacyValidation: + return IsValidLegacyMetricName(string(n)) + case UTF8Validation: + if len(n) == 0 { + return false + } + return utf8.ValidString(string(n)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + if !isValidLegacyRune(b, i) { return false } } return true } + +// EscapeMetricFamily escapes the given metric names and labels with the given +// escaping scheme. Returns a new object that uses the same pointers to fields +// when possible and creates new escaped versions so as not to mutate the +// input. +func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { + if v == nil { + return nil + } + + if scheme == NoEscaping { + return v + } + + out := &dto.MetricFamily{ + Help: v.Help, + Type: v.Type, + Unit: v.Unit, + } + + // If the name is nil, copy as-is, don't try to escape. + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { + out.Name = v.Name + } else { + out.Name = proto.String(EscapeName(v.GetName(), scheme)) + } + for _, m := range v.Metric { + if !metricNeedsEscaping(m) { + out.Metric = append(out.Metric, m) + continue + } + + escaped := &dto.Metric{ + Gauge: m.Gauge, + Counter: m.Counter, + Summary: m.Summary, + Untyped: m.Untyped, + Histogram: m.Histogram, + TimestampMs: m.TimestampMs, + } + + for _, l := range m.Label { + if l.GetName() == MetricNameLabel { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(MetricNameLabel), + Value: proto.String(EscapeName(l.GetValue(), scheme)), + }) + continue + } + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(EscapeName(l.GetName(), scheme)), + Value: l.Value, + }) + } + out.Metric = append(out.Metric, escaped) + } + return out +} + +func metricNeedsEscaping(m *dto.Metric) bool { + for _, l := range m.Label { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { + return true + } + if !IsValidLegacyMetricName(l.GetName()) { + return true + } + } + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +// EscapeName escapes the incoming name according to the provided escaping +// scheme. Depending on the rules of escaping, this may cause no change in the +// string that is returned. (Especially NoEscaping, which by definition is a +// noop). This function does not do any validation of the name. +func EscapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + var escaped strings.Builder + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + if IsValidLegacyMetricName(name) { + return name + } + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case DotsEscaping: + // Do not early return for legacy valid names, we still escape underscores. + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if b == '.' { + escaped.WriteString("_dot_") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case ValueEncodingEscaping: + if IsValidLegacyMetricName(name) { + return name + } + escaped.WriteString("U__") + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else if b < 0x100 { + escaped.WriteRune('_') + for s := 4; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } else if b < 0x10000 { + escaped.WriteRune('_') + for s := 12; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } + } + return escaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +// lower function taken from strconv.atoi +func lower(c byte) byte { + return c | ('x' - 'X') +} + +// UnescapeName unescapes the incoming name according to the provided escaping +// scheme if possible. Some schemes are partially or totally non-roundtripable. +// If any error is enountered, returns the original input. +func UnescapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + // It is not possible to unescape from underscore replacement. + return name + case DotsEscaping: + name = strings.ReplaceAll(name, "_dot_", ".") + name = strings.ReplaceAll(name, "__", "_") + return name + case ValueEncodingEscaping: + escapedName, found := strings.CutPrefix(name, "U__") + if !found { + return name + } + + var unescaped strings.Builder + TOP: + for i := 0; i < len(escapedName); i++ { + // All non-underscores are treated normally. + if escapedName[i] != '_' { + unescaped.WriteByte(escapedName[i]) + continue + } + i++ + if i >= len(escapedName) { + return name + } + // A double underscore is a single underscore. + if escapedName[i] == '_' { + unescaped.WriteByte('_') + continue + } + // We think we are in a UTF-8 code, process it. + var utf8Val uint + for j := 0; i < len(escapedName); j++ { + // This is too many characters for a utf8 value. + if j > 4 { + return name + } + // Found a closing underscore, convert to a rune, check validity, and append. + if escapedName[i] == '_' { + utf8Rune := rune(utf8Val) + if !utf8.ValidRune(utf8Rune) { + return name + } + unescaped.WriteRune(utf8Rune) + continue TOP + } + r := lower(escapedName[i]) + utf8Val *= 16 + if r >= '0' && r <= '9' { + utf8Val += uint(r) - '0' + } else if r >= 'a' && r <= 'f' { + utf8Val += uint(r) - 'a' + 10 + } else { + return name + } + i++ + } + // Didn't find closing underscore, invalid. + return name + } + return unescaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +func isValidLegacyRune(b rune, i int) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) +} + +func (e EscapingScheme) String() string { + switch e { + case NoEscaping: + return AllowUTF8 + case UnderscoreEscaping: + return EscapeUnderscores + case DotsEscaping: + return EscapeDots + case ValueEncodingEscaping: + return EscapeValues + default: + panic(fmt.Sprintf("unknown format scheme %d", e)) + } +} + +func ToEscapingScheme(s string) (EscapingScheme, error) { + if s == "" { + return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + } + switch s { + case AllowUTF8: + return NoEscaping, nil + case EscapeUnderscores: + return UnderscoreEscaping, nil + case EscapeDots: + return DotsEscaping, nil + case EscapeValues: + return ValueEncodingEscaping, nil + default: + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) + } +} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index a618ec24d8..2c8f4808c1 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -26,33 +26,28 @@ linters: - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck + - staticcheck #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck + - stylecheck #- typecheck - unconvert #- unparam diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 5f965e057f..25c30e3ccc 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,21 +17,17 @@ package cobra import ( "fmt" "os" - "regexp" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -60,8 +56,5 @@ func GetActiveHelpConfig(cmd *Command) string { // variable. It has the format _ACTIVE_HELP where is the name of the // root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a81..ed1e70ceaa 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8a53151840..f4d198cbcb 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index a6b160ce53..e0b0947b04 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -193,8 +193,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 2fbe6c131a..54748fc67e 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -154,8 +154,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -706,7 +708,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -754,14 +756,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -873,7 +875,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -1187,10 +1189,11 @@ func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() if c.Flags().Lookup("help") == nil { usage := "help for " - if c.Name() == "" { + name := c.displayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } c.Flags().BoolP("help", "h", false, usage) _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) @@ -1236,7 +1239,7 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, +Simply type ` + c.displayName() + ` help [path to command] for full details.`, ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { var completions []string cmd, _, e := c.Root().Find(args) @@ -1427,6 +1430,10 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.displayName() +} + +func (c *Command) displayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1436,10 +1443,11 @@ func (c *Command) CommandPath() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1452,7 +1460,6 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. -// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -1642,7 +1649,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1653,10 +1660,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1666,11 +1674,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1693,11 +1702,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1718,6 +1728,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1725,7 +1736,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1738,9 +1749,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1857,7 +1868,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b60f6b2000..c0c08b0572 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -211,24 +213,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -237,14 +244,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -291,7 +298,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -899,3 +906,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go index b8c15ce885..2138f24882 100644 --- a/vendor/github.com/spf13/cobra/doc/man_docs.go +++ b/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -133,7 +133,7 @@ func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error { } header.Date = &now } - header.date = (*header.Date).Format("Jan 2006") + header.date = header.Date.Format("Jan 2006") if header.Source == "" && !disableAutoGen { header.Source = "Auto generated by spf13/cobra" } diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go index f98fe2a3b8..12592223ba 100644 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -128,7 +128,7 @@ func GenMarkdownTree(cmd *cobra.Command, dir string) error { return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) } -// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but +// GenMarkdownTreeCustom is the same as GenMarkdownTree, but // with custom filePrepender and linkHandler. func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { for _, c := range cmd.Commands() { diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go index 2cca6fd778..c33acc2baa 100644 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.go +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go @@ -140,7 +140,7 @@ func GenReSTTree(cmd *cobra.Command, dir string) error { return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler) } -// GenReSTTreeCustom is the the same as GenReSTTree, but +// GenReSTTreeCustom is the same as GenReSTTree, but // with custom filePrepender and linkHandler. func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { for _, c := range cmd.Commands() { diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go index 0aaa07a166..4de4ceee87 100644 --- a/vendor/github.com/spf13/cobra/doc/util.go +++ b/vendor/github.com/spf13/cobra/doc/util.go @@ -40,7 +40,7 @@ func hasSeeAlso(cmd *cobra.Command) bool { // that do not contain \n. func forceMultiLine(s string) string { if len(s) > 60 && !strings.Contains(s, "\n") { - s = s + "\n" + s += "\n" } return s } diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 0671ec5f20..560612fd33 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,9 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -37,7 +37,7 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,7 +53,7 @@ func (c *Command) MarkFlagsOneRequired(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -70,7 +70,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } @@ -91,9 +91,9 @@ func (c *Command) ValidateFlagGroups() error { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { @@ -130,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -232,9 +232,9 @@ func (c *Command) enforceFlagGroupsForCompletion() { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -253,17 +253,17 @@ func (c *Command) enforceFlagGroupsForCompletion() { // If none of the flags of a one-required group are present, we make all the flags // of that group required so that the shell completion suggests them automatically for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 + isSet := false - for _, isSet := range flagnameAndStatus { + for _, isSet = range flagnameAndStatus { if isSet { - set++ + break } } // None of the flags of the group are set, mark all flags in the group // as required - if set == 0 { + if !isSet { for _, fName := range strings.Split(flagList, " ") { _ = c.MarkFlagRequired(fName) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 5519519394..a830b7bcad 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml new file mode 100644 index 0000000000..8902bdaaff --- /dev/null +++ b/vendor/github.com/x448/float16/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.11.x + +env: + - GO111MODULE=on + +script: + - go test -short -coverprofile=coverage.txt -covermode=count ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE new file mode 100644 index 0000000000..bf6e357854 --- /dev/null +++ b/vendor/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md new file mode 100644 index 0000000000..b524b8135d --- /dev/null +++ b/vendor/github.com/x448/float16/README.md @@ -0,0 +1,133 @@ +# Float16 (Binary16) in Go/Golang +[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16) +[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16) +[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16) +[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE) + +`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16. + +IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result. + +All possible 4+ billion floating-point conversions with this library are verified to be correct. + +Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library. + +## Features +Current features include: + +* float16 to float32 conversions use lossless conversion. +* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven". +* conversions using pure Go take about 2.65 ns/op on a desktop amd64. +* unit tests provide 100% code coverage and check all possible 4+ billion conversions. +* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc. +* all functions in this library use zero allocs except String(). + +## Status +This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published. + +Current status: + +* core API is done and breaking API changes are unlikely. +* 100% of unit tests pass: + * short mode (`go test -short`) tests around 65765 conversions in 0.005s. + * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s. +* 100% code coverage with both short mode and normal mode. +* tested on amd64 but it should work on all little-endian platforms supported by Go. + +Roadmap: + +* add functions for fast batch conversions leveraging SIMD when supported by hardware. +* speed up unit test when verifying all possible 4+ billion conversions. +* test on additional platforms. + +## Float16 to Float32 Conversion +Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct. + +Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions. + +## Float32 to Float16 Conversion +Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct. + +Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32(). + +Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage. + +## Usage +Install with `go get github.com/x448/float16`. +``` +// Convert float32 to float16 +pi := float32(math.Pi) +pi16 := float16.Fromfloat32(pi) + +// Convert float16 to float32 +pi32 := pi16.Float32() + +// PrecisionFromfloat32() is faster than the overhead of calling a function. +// This example only converts if there's no data loss and input is not a subnormal. +if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact { + pi16 := float16.Fromfloat32(pi) +} +``` + +## Float16 Type and API +Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods. +``` +package float16 // import "github.com/x448/float16" + +// Exported types and consts +type Float16 uint16 +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +// Exported functions +Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding + with identical results to AMD and Intel F16C hardware. NaN inputs + are converted with quiet bit always set on, to be like F16C. + +FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit. + // The "ps" suffix means "preserve signaling". + // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN. + +Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.) +NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number +Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign + +PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow + // (inline and < 1 ns/op) +// Exported methods +(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion +(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f +(f Float16) IsNaN() bool // true if f is not-a-number (NaN) +(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN) +(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf) +(f Float16) IsFinite() bool // true if f is not infinite or NaN +(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN. +(f Float16) Signbit() bool // true if f is negative or negative zero +(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface +``` +See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info. + +## Benchmarks +Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value. + +``` +All functions have zero allocations except float16.String(). + +FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16 +ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32 +Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16 + +PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc. +``` + +## System Requirements +* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions. +* Tested on amd64 but it should also work on all little-endian platforms supported by Go. + +## Special Thanks +Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16. + +## License +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Licensed under [MIT License](LICENSE) diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go new file mode 100644 index 0000000000..1a0e6dad00 --- /dev/null +++ b/vendor/github.com/x448/float16/float16.go @@ -0,0 +1,302 @@ +// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker +// +// Special thanks to Kathryn Long for her Rust implementation +// of float16 at github.com/starkat99/half-rs (MIT license) + +package float16 + +import ( + "math" + "strconv" +) + +// Float16 represents IEEE 754 half-precision floating-point numbers (binary16). +type Float16 uint16 + +// Precision indicates whether the conversion to Float16 is +// exact, subnormal without dropped bits, inexact, underflow, or overflow. +type Precision int + +const ( + + // PrecisionExact is for non-subnormals that don't drop bits during conversion. + // All of these can round-trip. Should always convert to float16. + PrecisionExact Precision = iota + + // PrecisionUnknown is for subnormals that don't drop bits during conversion but + // not all of these can round-trip so precision is unknown without more effort. + // Only 2046 of these can round-trip and the rest cannot round-trip. + PrecisionUnknown + + // PrecisionInexact is for dropped significand bits and cannot round-trip. + // Some of these are subnormals. Cannot round-trip float32->float16->float32. + PrecisionInexact + + // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32. + PrecisionUnderflow + + // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32. + PrecisionOverflow +) + +// PrecisionFromfloat32 returns Precision without performing +// the conversion. Conversions from both Infinity and NaN +// values will always report PrecisionExact even if NaN payload +// or NaN-Quiet-Bit is lost. This function is kept simple to +// allow inlining and run < 0.5 ns/op, to serve as a fast filter. +func PrecisionFromfloat32(f32 float32) Precision { + u32 := math.Float32bits(f32) + + if u32 == 0 || u32 == 0x80000000 { + // +- zero will always be exact conversion + return PrecisionExact + } + + const COEFMASK uint32 = 0x7fffff // 23 least significant bits + const EXPSHIFT uint32 = 23 + const EXPBIAS uint32 = 127 + const EXPMASK uint32 = uint32(0xff) << EXPSHIFT + const DROPMASK uint32 = COEFMASK >> 10 + + exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS) + coef := u32 & COEFMASK + + if exp == 128 { + // +- infinity or NaN + // apps may want to do extra checks for NaN separately + return PrecisionExact + } + + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says, + // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24" + if exp < -24 { + return PrecisionUnderflow + } + if exp > 15 { + return PrecisionOverflow + } + if (coef & DROPMASK) != uint32(0) { + // these include subnormals and non-subnormals that dropped bits + return PrecisionInexact + } + + if exp < -14 { + // Subnormals. Caller may want to test these further. + // There are 2046 subnormals that can successfully round-trip f32->f16->f32 + // and 20 of those 2046 have 32-bit input coef == 0. + // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value" + // so some protocols and libraries will choose to handle subnormals differently + // when deciding to encode them to CBOR float32 vs float16. + return PrecisionUnknown + } + + return PrecisionExact +} + +// Frombits returns the float16 number corresponding to the IEEE 754 binary16 +// representation u16, with the sign bit of u16 and the result in the same bit +// position. Frombits(Bits(x)) == x. +func Frombits(u16 uint16) Float16 { + return Float16(u16) +} + +// Fromfloat32 returns a Float16 value converted from f32. Conversion uses +// IEEE default rounding (nearest int, with ties to even). +func Fromfloat32(f32 float32) Float16 { + return Float16(f32bitsToF16bits(math.Float32bits(f32))) +} + +// ErrInvalidNaNValue indicates a NaN was not received. +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +type float16Error string + +func (e float16Error) Error() string { return string(e) } + +// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both +// signaling and payload. Unlike Fromfloat32(), which can only return +// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN. +// If the result is infinity (sNaN with empty payload), then the +// lowest bit of payload is set to make the result a NaN. +// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN. +// This function was kept simple to be able to inline. +func FromNaN32ps(nan float32) (Float16, error) { + const SNAN = Float16(uint16(0x7c01)) // signalling NaN + + u32 := math.Float32bits(nan) + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if (exp != 0x7f800000) || (coef == 0) { + return SNAN, ErrInvalidNaNValue + } + + u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13)) + + if (u16 & 0x03ff) == 0 { + // result became infinity, make it NaN by setting lowest bit in payload + u16 = u16 | 0x0001 + } + + return Float16(u16), nil +} + +// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN). +// Returned NaN value 0x7e01 has all exponent bits = 1 with the +// first and last bits = 1 in the significand. This is consistent +// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00. +func NaN() Float16 { + return Float16(0x7e01) +} + +// Inf returns a Float16 with an infinity value with the specified sign. +// A sign >= returns positive infinity. +// A sign < 0 returns negative infinity. +func Inf(sign int) Float16 { + if sign >= 0 { + return Float16(0x7c00) + } + return Float16(0x8000 | 0x7c00) +} + +// Float32 returns a float32 converted from f (Float16). +// This is a lossless conversion. +func (f Float16) Float32() float32 { + u32 := f16bitsToF32bits(uint16(f)) + return math.Float32frombits(u32) +} + +// Bits returns the IEEE 754 binary16 representation of f, with the sign bit +// of f and the result in the same bit position. Bits(Frombits(x)) == x. +func (f Float16) Bits() uint16 { + return uint16(f) +} + +// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value. +func (f Float16) IsNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) +} + +// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16 +// “not-a-number” value. +func (f Float16) IsQuietNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0) +} + +// IsInf reports whether f is an infinity (inf). +// A sign > 0 reports whether f is positive inf. +// A sign < 0 reports whether f is negative inf. +// A sign == 0 reports whether f is either inf. +func (f Float16) IsInf(sign int) bool { + return ((f == 0x7c00) && sign >= 0) || + (f == 0xfc00 && sign <= 0) +} + +// IsFinite returns true if f is neither infinite nor NaN. +func (f Float16) IsFinite() bool { + return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00) +} + +// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN. +func (f Float16) IsNormal() bool { + exp := uint16(f) & uint16(0x7c00) + return (exp != uint16(0x7c00)) && (exp != 0) +} + +// Signbit reports whether f is negative or negative zero. +func (f Float16) Signbit() bool { + return (uint16(f) & uint16(0x8000)) != 0 +} + +// String satisfies the fmt.Stringer interface. +func (f Float16) String() string { + return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32) +} + +// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16. +func f16bitsToF32bits(in uint16) uint32 { + // All 65536 conversions with this were confirmed to be correct + // by Montgomery Edwards⁴⁴⁸ (github.com/x448). + + sign := uint32(in&0x8000) << 16 // sign for 32-bit + exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit + coef := uint32(in&0x03ff) << 13 // significand for 32-bit + + if exp == 0x1f { + if coef == 0 { + // infinity + return sign | 0x7f800000 | coef + } + // NaN + return sign | 0x7fc00000 | coef + } + + if exp == 0 { + if coef == 0 { + // zero + return sign + } + + // normalize subnormal numbers + exp++ + for coef&0x7f800000 == 0 { + coef <<= 1 + exp-- + } + coef &= 0x007fffff + } + + return sign | ((exp + (0x7f - 0xf)) << 23) | coef +} + +// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32. +// Conversion rounds to nearest integer with ties to even. +func f32bitsToF16bits(u32 uint32) uint16 { + // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448). + // All 4294967296 conversions with this were confirmed to be correct by x448. + // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license. + + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if exp == 0x7f800000 { + // NaN or Infinity + nanBit := uint32(0) + if coef != 0 { + nanBit = uint32(0x0200) + } + return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13)) + } + + halfSign := sign >> 16 + + unbiasedExp := int32(exp>>23) - 127 + halfExp := unbiasedExp + 15 + + if halfExp >= 0x1f { + return uint16(halfSign | uint32(0x7c00)) + } + + if halfExp <= 0 { + if 14-halfExp > 24 { + return uint16(halfSign) + } + coef := coef | uint32(0x00800000) + halfCoef := coef >> uint32(14-halfExp) + roundBit := uint32(1) << uint32(13-halfExp) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + halfCoef++ + } + return uint16(halfSign | halfCoef) + } + + uHalfExp := uint32(halfExp) << 10 + halfCoef := coef >> 13 + roundBit := uint32(0x00001000) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + return uint16((halfSign | uHalfExp | halfCoef) + 1) + } + return uint16(halfSign | uHalfExp | halfCoef) +} diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 6b5177fc3c..36719c1920 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.14" + Version = "3.5.16" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go index 34f35b9f28..f0f3739aad 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -58,7 +58,7 @@ var DefaultZapLoggerConfig = zap.Config{ // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.999999Z0700")) + enc.AppendString(t.Format("2006-01-02T15:04:05.000000Z0700")) }, EncodeDuration: zapcore.StringDurationEncoder, diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go index 150545d08d..a7d37688d9 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go @@ -180,12 +180,23 @@ type TLSInfo struct { parseFunc func([]byte, []byte) (tls.Certificate, error) // AllowedCN is a CN which must be provided by a client. + // + // Deprecated: use AllowedCNs instead. AllowedCN string // AllowedHostname is an IP address or hostname that must match the TLS // certificate provided by a client. + // + // Deprecated: use AllowedHostnames instead. AllowedHostname string + // AllowedCNs is a list of acceptable CNs which must be provided by a client. + AllowedCNs []string + + // AllowedHostnames is a list of acceptable IP addresses or hostnames that must match the + // TLS certificate provided by a client. + AllowedHostnames []string + // Logger logs TLS errors. // If nil, all logs are discarded. Logger *zap.Logger @@ -407,19 +418,52 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // Client certificates may be verified by either an exact match on the CN, // or a more general check of the CN and SANs. var verifyCertificate func(*x509.Certificate) bool + + if info.AllowedCN != "" && len(info.AllowedCNs) > 0 { + return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs) + } + if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames) + } + if info.AllowedCN != "" && info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames) + } + if info.AllowedCN != "" { - if info.AllowedHostname != "" { - return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) - } + info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead") verifyCertificate = func(cert *x509.Certificate) bool { return info.AllowedCN == cert.Subject.CommonName } } if info.AllowedHostname != "" { + info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead") verifyCertificate = func(cert *x509.Certificate) bool { return cert.VerifyHostname(info.AllowedHostname) == nil } } + if len(info.AllowedCNs) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedCN := range info.AllowedCNs { + if allowedCN == cert.Subject.CommonName { + return true + } + } + return false + } + } + if len(info.AllowedHostnames) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedHostname := range info.AllowedHostnames { + if cert.VerifyHostname(allowedHostname) == nil { + return true + } + } + return false + } + } if verifyCertificate != nil { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { diff --git a/vendor/go.etcd.io/etcd/client/v3/auth.go b/vendor/go.etcd.io/etcd/client/v3/auth.go index a6f75d3215..110918a4c7 100644 --- a/vendor/go.etcd.io/etcd/client/v3/auth.go +++ b/vendor/go.etcd.io/etcd/client/v3/auth.go @@ -134,67 +134,67 @@ func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth { func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthenticateResponse)(resp), toErr(ctx, err) + return (*AuthenticateResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) - return (*AuthEnableResponse)(resp), toErr(ctx, err) + return (*AuthEnableResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) - return (*AuthDisableResponse)(resp), toErr(ctx, err) + return (*AuthDisableResponse)(resp), ContextError(ctx, err) } func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) { resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...) - return (*AuthStatusResponse)(resp), toErr(ctx, err) + return (*AuthStatusResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) + return (*AuthUserAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) + return (*AuthUserAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) + return (*AuthUserDeleteResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) + return (*AuthUserChangePasswordResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) + return (*AuthUserGrantRoleResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) + return (*AuthUserGetResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) - return (*AuthUserListResponse)(resp), toErr(ctx, err) + return (*AuthUserListResponse)(resp), ContextError(ctx, err) } func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) + return (*AuthUserRevokeRoleResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) + return (*AuthRoleAddResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { @@ -204,27 +204,27 @@ func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, ke PermType: authpb.Permission_Type(permType), } resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) + return (*AuthRoleGrantPermissionResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) + return (*AuthRoleGetResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) + return (*AuthRoleListResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) + return (*AuthRoleRevokePermissionResponse)(resp), ContextError(ctx, err) } func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) + return (*AuthRoleDeleteResponse)(resp), ContextError(ctx, err) } func StrToPermissionType(s string) (PermissionType, error) { diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index 312d03e7a6..f7aa65a0a7 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -148,7 +148,7 @@ func (c *Client) Close() error { c.Lease.Close() } if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) + return ContextError(c.ctx, c.conn.Close()) } return c.ctx.Err() } @@ -573,7 +573,9 @@ func isUnavailableErr(ctx context.Context, err error) bool { return false } -func toErr(ctx context.Context, err error) error { +// ContextError converts the error into an EtcdError if the error message matches one of +// the defined messages; otherwise, it tries to retrieve the context error. +func ContextError(ctx context.Context, err error) error { if err == nil { return nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/cluster.go b/vendor/go.etcd.io/etcd/client/v3/cluster.go index 92d7cdb56b..1815c1c964 100644 --- a/vendor/go.etcd.io/etcd/client/v3/cluster.go +++ b/vendor/go.etcd.io/etcd/client/v3/cluster.go @@ -93,7 +93,7 @@ func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner b } resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberAddResponse)(resp), nil } @@ -102,7 +102,7 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes r := &pb.MemberRemoveRequest{ID: id} resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberRemoveResponse)(resp), nil } @@ -119,7 +119,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin if err == nil { return (*MemberUpdateResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { @@ -128,14 +128,14 @@ func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { if err == nil { return (*MemberListResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { r := &pb.MemberPromoteRequest{ID: id} resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*MemberPromoteResponse)(resp), nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go index 5e9fb7d458..be5b508dd6 100644 --- a/vendor/go.etcd.io/etcd/client/v3/kv.go +++ b/vendor/go.etcd.io/etcd/client/v3/kv.go @@ -112,23 +112,23 @@ func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) + return r.put, ContextError(ctx, err) } func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) + return r.get, ContextError(ctx, err) } func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) + return r.del, ContextError(ctx, err) } func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*CompactResponse)(resp), err } @@ -173,5 +173,5 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { default: panic("Unknown op") } - return OpResponse{}, toErr(ctx, err) + return OpResponse{}, ContextError(ctx, err) } diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go index 19af9c093a..4e7d1caf83 100644 --- a/vendor/go.etcd.io/etcd/client/v3/lease.go +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -223,7 +223,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err } return gresp, nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { @@ -232,14 +232,14 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, if err == nil { return (*LeaseRevokeResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { r := toLeaseTimeToLiveRequest(id, opts...) resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } gresp := &LeaseTimeToLiveResponse{ ResponseHeader: resp.GetHeader(), @@ -260,7 +260,7 @@ func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { } return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { @@ -315,7 +315,7 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive return resp, err } if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } } } @@ -405,13 +405,13 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKe stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer func() { if err := stream.CloseSend(); err != nil { if ferr == nil { - ferr = toErr(ctx, err) + ferr = ContextError(ctx, err) } return } @@ -419,12 +419,12 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKe err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } resp, rerr := stream.Recv() if rerr != nil { - return nil, toErr(ctx, rerr) + return nil, ContextError(ctx, rerr) } karesp = &LeaseKeepAliveResponse{ @@ -461,7 +461,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { return err } - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + if ContextError(l.stopCtx, err) == rpctypes.ErrNoLeader { l.closeRequireLeader() } break diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go index a98b8ca51e..71b28e6dc3 100644 --- a/vendor/go.etcd.io/etcd/client/v3/maintenance.go +++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go @@ -130,7 +130,7 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { if err == nil { return (*AlarmResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -143,13 +143,13 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { ar, err := m.AlarmList(ctx) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } ret := AlarmResponse{} for _, am := range ar.Alarms { dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) if derr != nil { - return nil, toErr(ctx, derr) + return nil, ContextError(ctx, derr) } ret.Alarms = append(ret.Alarms, dresp.Alarms...) } @@ -160,18 +160,18 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR if err == nil { return (*AlarmResponse)(resp), nil } - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*DefragmentResponse)(resp), nil } @@ -179,12 +179,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*StatusResponse)(resp), nil } @@ -193,12 +193,12 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } defer cancel() resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } return (*HashKVResponse)(resp), nil } @@ -206,7 +206,7 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) if err != nil { - return nil, toErr(ctx, err) + return nil, ContextError(ctx, err) } m.lg.Info("opened snapshot stream; downloading") @@ -246,10 +246,10 @@ type snapshotReadCloser struct { func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { n, err = rc.ReadCloser.Read(p) - return n, toErr(rc.ctx, err) + return n, ContextError(rc.ctx, err) } func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) - return (*MoveLeaderResponse)(resp), toErr(ctx, err) + return (*MoveLeaderResponse)(resp), ContextError(ctx, err) } diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go index 3f6a953cf0..e31bfe0b94 100644 --- a/vendor/go.etcd.io/etcd/client/v3/txn.go +++ b/vendor/go.etcd.io/etcd/client/v3/txn.go @@ -144,7 +144,7 @@ func (txn *txn) Commit() (*TxnResponse, error) { var err error resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) if err != nil { - return nil, toErr(txn.ctx, err) + return nil, ContextError(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index 41a6ec9763..725e8a869e 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -442,7 +442,7 @@ func (w *watchGrpcStream) close() (err error) { case err = <-w.errc: default: } - return toErr(w.ctx, err) + return ContextError(w.ctx, err) } func (w *watcher) closeStream(wgs *watchGrpcStream) { @@ -653,7 +653,7 @@ func (w *watchGrpcStream) run() { // watch client failed on Recv; spawn another if possible case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + if isHaltErr(w.ctx, err) || ContextError(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err return } @@ -1036,7 +1036,7 @@ func (pr *progressRequest) toPB() *pb.WatchRequest { func streamKeyFromCtx(ctx context.Context) string { if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) + return fmt.Sprintf("%+v", map[string][]string(md)) } return "" } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 06282ce79c..18436eaedf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -1,20 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( + "google.golang.org/grpc/stats" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" @@ -31,18 +22,28 @@ const ( GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) -// Filter is a predicate used to determine whether a given request in -// interceptor info should be traced. A Filter must return true if +// InterceptorFilter is a predicate used to determine whether a given request in +// interceptor info should be instrumented. A InterceptorFilter must return true if // the request should be traced. -type Filter func(*InterceptorInfo) bool +// +// Deprecated: Use stats handlers instead. +type InterceptorFilter func(*InterceptorInfo) bool + +// Filter is a predicate used to determine whether a given request in +// should be instrumented by the attached RPC tag info. +// A Filter must return true if the request should be instrumented. +type Filter func(*stats.RPCTagInfo) bool // config is a group of options for this instrumentation. type config struct { - Filter Filter - Propagators propagation.TextMapPropagator - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider - SpanStartOptions []trace.SpanStartOption + Filter Filter + InterceptorFilter InterceptorFilter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -163,15 +164,30 @@ func (o tracerProviderOption) apply(c *config) { // WithInterceptorFilter returns an Option to use the request filter. // // Deprecated: Use stats handlers instead. -func WithInterceptorFilter(f Filter) Option { +func WithInterceptorFilter(f InterceptorFilter) Option { return interceptorFilterOption{f: f} } type interceptorFilterOption struct { - f Filter + f InterceptorFilter } func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.InterceptorFilter = o.f + } +} + +// WithFilter returns an Option to use the request filter. +func WithFilter(f Filter) Option { + return filterOption{f: f} +} + +type filterOption struct { + f Filter +} + +func (o filterOption) apply(c *config) { if o.f != nil { c.Filter = o.f } @@ -243,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go index 958dcd87a4..b8b836b00f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 3b487a9362..7d5ed05808 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -18,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -59,7 +49,7 @@ var ( ) // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { @@ -81,7 +71,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { Method: method, Type: UnaryClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return invoker(ctx, method, req, reply, cc, callOpts...) } @@ -147,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -196,7 +186,7 @@ func (w *clientStream) CloseSend() error { return err } -func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { +func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { return &clientStream{ ClientStream: s, span: span, @@ -219,7 +209,7 @@ func (w *clientStream) endSpan(err error) { } // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { @@ -241,7 +231,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { Method: method, Type: StreamClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return streamer(ctx, desc, cc, method, callOpts...) } @@ -270,7 +260,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { span.End() return s, err } - stream := wrapClientStream(ctx, s, desc, span, cfg) + stream := wrapClientStream(s, desc, span, cfg) return stream, nil } } @@ -296,7 +286,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { UnaryServerInfo: info, Type: UnaryServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(ctx, req) } @@ -344,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } @@ -422,7 +412,7 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { StreamServerInfo: info, Type: StreamServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(srv, wrapServerStream(ctx, ss, cfg)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go index f6116946bf..b62f7cd7c4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index cf32a9e978..bef07b7a3c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index f585fb6ae0..3aa37915df 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go index b65fab308f..409c621b74 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 73d2b8b6b2..fbcbfb84e0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -38,6 +27,7 @@ type gRPCContext struct { messagesReceived int64 messagesSent int64 metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -72,11 +62,15 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return context.WithValue(ctx, gRPCContextKey{}, &gctx) } @@ -108,11 +102,15 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) @@ -141,6 +139,9 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) if gctx != nil { + if !gctx.record { + return + } metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) metricAttrs = append(metricAttrs, gctx.metricAttrs...) } @@ -150,7 +151,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,7 +167,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -203,14 +204,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index d633c4bef0..51d28156be 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.49.0" + return "0.55.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index deea149645..6aae83bfd2 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -12,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 214acaf581..5d6e6156b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Client HTTP metrics. const ( clientRequestSize = "http.client.request.size" // Outgoing request bytes total diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index c1015a9ecc..a01bfafbe0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -33,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -100,7 +103,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. @@ -194,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index c64f8beca7..33580a35b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -9,10 +9,9 @@ import ( "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -23,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -33,10 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - traceSemconv semconv.HTTPServer - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -55,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, - - traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -66,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -77,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -87,6 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) } func handleErr(err error) { @@ -95,30 +87,6 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -133,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } opts = append(opts, h.spanStartOptions...) @@ -165,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -182,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -204,41 +164,47 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - span.SetStatus(semconv.ServerStatus(rww.statusCode)) - span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ - StatusCode: rww.statusCode, - ReadBytes: bw.read.Load(), - ReadError: bw.err, - WriteBytes: rww.written, - WriteError: rww.err, + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), })...) - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) - // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { - attr := semconv.NewHTTPServer().Route(route) + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 0000000000..a945f55661 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 0000000000..fbc344cbdd --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9be3feef29..9cae4cab86 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -4,11 +4,15 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "context" "fmt" "net/http" + "os" + "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" ) type ResponseTelemetry struct { @@ -19,46 +23,57 @@ type ResponseTelemetry struct { WriteError error } -type HTTPServer interface { - // RequestTraceAttrs returns trace attributes for an HTTP request received by a - // server. - // - // The server must be the primary server name if it is known. For example this - // would be the ServerName directive - // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache - // server, and the server_name directive - // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an - // nginx server. More generically, the primary server name would be the host - // header value that matches the default virtual host of an HTTP server. It - // should include the host identifier and if a port is used to route to the - // server that port identifier should be included as an appropriate port - // suffix. - // - // If the primary server name is not known, server should be an empty string. - // The req Host will be used to determine the server instead. - RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue - - // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. - // - // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. - ResponseTraceAttrs(ResponseTelemetry) []attribute.KeyValue - - // Route returns the attribute for the route. - Route(string) attribute.KeyValue +type HTTPServer struct { + duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } -// var warnOnce = sync.Once{} +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} -func NewHTTPServer() HTTPServer { - // TODO (#5331): Detect version based on environment variable OTEL_HTTP_CLIENT_COMPATIBILITY_MODE. - // TODO (#5331): Add warning of use of a deprecated version of Semantic Versions. - return oldHTTPServer{} +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) } -// ServerStatus returns a span status code and message for an HTTP status code +// Status returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func ServerStatus(code int) (codes.Code, string) { +func (s HTTPServer) Status(code int) (codes.Code, string) { if code < 100 || code >= 600 { return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) } @@ -67,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) { } return codes.Unset, "" } + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 0000000000..745b8c67bc --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index c92076bc3d..e6e14924f5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -5,8 +5,13 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ import ( "net" + "net/http" "strconv" "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -45,5 +50,49 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Byte size checked 16 above. +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index d753083b7b..c999b05e67 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -4,18 +4,21 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "errors" "io" "net/http" + "slices" + "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) type oldHTTPServer struct{} -var _ HTTPServer = oldHTTPServer{} - // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. // @@ -45,7 +48,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.ReadBytes > 0 { attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) } - if resp.ReadError != nil && resp.ReadError != io.EOF { + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) } @@ -55,7 +58,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.StatusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) } - if resp.WriteError != nil && resp.WriteError != io.EOF { + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) } @@ -73,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue { return semconv.HTTPStatusCode(status) } + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d5c0093fc4..b80a1db61f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -92,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -138,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } @@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 1548b2db63..ea504e396f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -37,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 8a25e58657..b4119d3438 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,15 +11,16 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -27,14 +28,16 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + semconv semconv.HTTPClient requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram @@ -54,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, + rt: base, + semconv: semconv.NewHTTPClient(), } defaultOpts := []Option{ @@ -77,6 +81,7 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn } func (t *Transport) createMeasures() { @@ -137,49 +142,56 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { t.responseBytesCounter.Add(ctx, n, o) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) @@ -191,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 22e485dd7d..1133961d39 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.52.0" + return "0.55.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 2f4cc124dc..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9c7..6bf3abc41e 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3b..e2cb3ea944 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f56982..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f382..a5f904197f 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,11 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +24,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -58,12 +64,12 @@ issues: - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -124,6 +130,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index e5946bfb25..fb107426e7 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,126 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + ## [1.27.0/0.49.0/0.3.0] 2024-05-21 ### Added @@ -133,7 +253,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -250,7 +370,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -282,15 +402,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -926,7 +1046,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1794,7 +1914,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -1971,7 +2091,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2368,7 +2488,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -2961,7 +3081,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.27.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...HEAD +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 [1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 @@ -3043,6 +3166,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 88f4c7d0e0..5904bb7070 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 2176ce5261..9158072535 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,7 +578,10 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -628,16 +631,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -646,11 +648,12 @@ should be canceled. - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep - [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a9845a88f6..b04695b242 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default -ci: generate license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -145,12 +145,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -178,17 +180,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix @@ -264,10 +263,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" @@ -280,16 +276,20 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown lint-markdown: diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a89093173..9a65707038 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d8..59992984d4 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index f98c54a3cb..36f5367030 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -735,9 +842,10 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return c >= 0 && c <= int32(utf8.RuneSelf) && safeKeyCharset[c] + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { @@ -850,7 +958,7 @@ var safeValueCharset = [utf8.RuneSelf]bool{ } func validateValueChar(c int32) bool { - return c >= 0 && c <= int32(utf8.RuneSelf) && safeValueCharset[c] + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6d..2acbac3546 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c595014..921f85961a 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md new file mode 100644 index 0000000000..50802d5aee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -0,0 +1,3 @@ +# OTLP Trace Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go index dbb40cf582..3c1a625c06 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go index 9e642235ad..09ad5eadb6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otlptrace contains abstractions for OTLP span exporters. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index b46a38d60a..3f0a518ae0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" @@ -104,7 +93,7 @@ func NewUnstarted(client Client) *Exporter { } } -// MarshalLog is the marshaling function used by the logging system to represent this exporter. +// MarshalLog is the marshaling function used by the logging system to represent this Exporter. func (e *Exporter) MarshalLog() interface{} { return struct { Type string diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index ec74f1aad7..4571a5ca39 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 7aaec38d22..f6dd3decc9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go index 05a1f78adb..db7b698a56 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index b83cbd7247..81157a71c5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -1,20 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + "math" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -113,18 +104,29 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), - DroppedAttributesCount: uint32(sd.DroppedAttributes()), - DroppedEventsCount: uint32(sd.DroppedEvents()), - DroppedLinksCount: uint32(sd.DroppedLinks()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { s.ParentSpanId = psid[:] } + s.Flags = buildSpanFlags(sd.Parent()) return s } +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode @@ -157,16 +159,28 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() + flags := buildSpanFlags(otLink.SpanContext) + sl = append(sl, &tracepb.Span_Link{ TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), - DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), + Flags: flags, }) } return sl } +func buildSpanFlags(sc trace.SpanContext) uint32 { + flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK + if sc.IsRemote() { + flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK + } + + return uint32(flags) +} + // spanEvents transforms span Events to an OTLP span events. func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { if len(es) == 0 { @@ -180,7 +194,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { Name: es[i].Name, TimeUnixNano: uint64(es[i].Time.UnixNano()), Attributes: KeyValues(es[i].Attributes), - DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } } return events diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md new file mode 100644 index 0000000000..5309bb7cb1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Trace gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index b4cc21d7a3..3993df927d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" @@ -89,11 +78,11 @@ func newClient(opts ...Option) *client { } // Start establishes a gRPC connection to the collector. -func (c *client) Start(ctx context.Context) error { +func (c *client) Start(context.Context) error { if c.conn == nil { // If the caller did not provide a ClientConn when the client was // created, create one using the configuration they did provide. - conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...) + conn, err := grpc.NewClient(c.endpoint, c.dialOpts...) if err != nil { return err } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index 1f514ef9ea..b7bd429ffd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otlptracegrpc provides an OTLP span exporter using gRPC. @@ -23,12 +12,11 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. -The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - setting "true" disables client transport security for the exporter's gRPC connection. @@ -40,7 +28,7 @@ The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - key-value pairs used as gRPC metadata associated with gRPC requests. -The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], except that additional semi-colon delimited metadata is not supported. Example value: "key1=value1,key2=value2". OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. @@ -63,12 +51,12 @@ OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_C The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - -the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - -the filepath to the clients private key to use in mTLS communication in PEM format. +the filepath to the client's private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go index 89af41002f..b826b84247 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 5530119e4c..4abf48d1f6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" @@ -26,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -174,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -200,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index 1fb2906189..97cd6c54f7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go index 32f6dddb4f..7bb189a94b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index dddb1f334d..8d4b4bf089 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -2,24 +2,15 @@ // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( "crypto/tls" "fmt" + "net/http" + "net/url" "path" "strings" "time" @@ -32,6 +23,7 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + "go.opentelemetry.io/otel/internal/global" ) const ( @@ -44,6 +36,10 @@ const ( ) type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + SignalConfig struct { Endpoint string Insecure bool @@ -55,6 +51,8 @@ type ( // gRPC configurations GRPCCredentials credentials.TransportCredentials + + Proxy HTTPTransportProxyFunc } Config struct { @@ -127,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -258,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { // Generic Options +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint @@ -265,6 +266,26 @@ func WithEndpoint(endpoint string) GenericOption { }) } +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". +func WithEndpointURL(v string) GenericOption { + return newGenericOption(func(cfg Config) Config { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "otlptrace: parse endpoint url", "url", v) + return cfg + } + + cfg.Traces.Endpoint = u.Host + cfg.Traces.URLPath = u.Path + if u.Scheme != "https" { + cfg.Traces.Insecure = true + } + + return cfg + }) +} + func WithCompression(compression Compression) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Compression = compression @@ -323,3 +344,10 @@ func WithTimeout(duration time.Duration) GenericOption { return cfg }) } + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Proxy = pf + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go index d9dcdc96e7..3d4f699d47 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go index 19b6d4b21f..38b97a0131 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index 076905e54b..a12ea4c48e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 3ce7d6632b..1c5450ab62 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package retry provides request retry functionality that can perform // configurable exponential backoff for transient errors and honor any @@ -123,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { - return fmt.Errorf("%w: %s", ctxErr, err) + return fmt.Errorf("%w: %w", ctxErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 17ffeaf6ef..00ab1f20c6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" @@ -64,14 +53,50 @@ func WithInsecure() Option { return wrappedOption{otlpconfig.WithInsecure()} } -// WithEndpoint sets the target endpoint the exporter will connect to. If -// unset, localhost:4317 will be used as a default. +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4317" (no +// scheme or path). +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. // // This option has no effect if WithGRPCConn is used. func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) +// the Exporter will connect to. The provided endpoint URL should resemble +// "https://example.com:4318/v1/traces". +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "https://localhost:4317/v1/traces" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(u string) Option { + return wrappedOption{otlpconfig.WithEndpointURL(u)} +} + // WithReconnectionPeriod set the minimum amount of time between connection // attempts to the target endpoint. // @@ -131,6 +156,8 @@ func WithServiceConfig(serviceConfig string) Option { // connection. The options here are appended to the internal grpc.DialOptions // used so they will take precedence over any other internal grpc.DialOptions // they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. // // This option has no effect if WithGRPCConn is used. func WithDialOption(opts ...grpc.DialOption) Option { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 3b3bc35911..ab473f01f7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.22.0" + return "1.30.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index f32766e57f..822d847947 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -14,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 590fa7385f..f2fc3929b1 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,8 +5,8 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -65,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { @@ -75,7 +76,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -91,17 +92,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -109,12 +122,12 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -132,169 +145,295 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + i := &siGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + i := &sfGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + insts = unwrapInstruments(insts) + return m.delegate.RegisterCallback(f, insts...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 596f716f40..e31f442b48 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -86,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -101,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b356..9b1da2c02b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -24,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index cf23db7780..f8435d8f28 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index c82ba5324e..e079aaef16 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 075234b338..f153745b00 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -57,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -153,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 460b3f9b08..14e08c24a4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -47,21 +47,41 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -71,7 +91,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -81,7 +106,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -91,27 +121,51 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -121,7 +175,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -131,7 +190,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -141,6 +205,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a @@ -186,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 8c5ac55ca9..4d36b98cf4 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -19,6 +19,10 @@ "matchManagers": ["gomod"], "matchDepTypes": ["indirect"], "enabled": false + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" } ] } diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e1384..ab09daf9d5 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index f4d1857c4f..f2cdf3c651 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -4,5 +4,6 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go deleted file mode 100644 index 1fc19d3fe3..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go deleted file mode 100644 index a990092f9d..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -import "time" - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 0000000000..fab61647c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 0000000000..68d296cbed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 50d2df5eb4..6ac1cdbf7b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -9,9 +9,11 @@ import ( "os" "path/filepath" + "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( @@ -36,6 +38,8 @@ type ( } defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} ) var ( @@ -43,6 +47,7 @@ var ( _ Detector = host{} _ Detector = stringDetector{} _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} ) // Detect returns a *Resource that describes the OpenTelemetry SDK used. @@ -95,3 +100,19 @@ func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) }, ).Detect(ctx) } + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 7525ee75f0..5ecd859a52 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 0d5a355ab9..813f056242 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 3c1aa6285b..2d0f65498a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type hostIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 71386e2da4..3677c83d7d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -10,17 +10,16 @@ import ( "golang.org/x/sys/windows/registry" ) -// implements hostIDReader +// implements hostIDReader. type hostIDReaderWindows struct{} -// read reads MachineGuid from the windows registry key: -// SOFTWARE\Microsoft\Cryptography +// read reads MachineGuid from the Windows registry key: +// SOFTWARE\Microsoft\Cryptography. func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index ff78020fa1..8a48ab4fa3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index 5e3d199d78..a6a5a53c0e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -17,7 +17,6 @@ import ( func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index e4e1df8c98..085fe68fd7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 9f1af3a236..ad4b50df40 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" ) // Resource describes an entity about which identifying information @@ -218,11 +219,17 @@ func Empty() *Resource { func Default() *Resource { defaultResourceOnce.Do(func() { var err error - defaultResource, err = Detect( - context.Background(), + defaultDetectors := []Detector{ defaultServiceNameDetector{}, fromEnv{}, telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., ) if err != nil { otel.Handle(err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 8a89fffdb4..1d399a75db 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -381,7 +381,7 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R } } -func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 69eb2fdfce..821c83faa1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -3,23 +3,43 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" +import ( + "slices" + "sync" + + "go.opentelemetry.io/otel/internal/global" +) + // evictedQueue is a FIFO queue with a configurable capacity. -type evictedQueue struct { - queue []interface{} +type evictedQueue[T any] struct { + queue []T capacity int droppedCount int + logDropped func() } -func newEvictedQueue(capacity int) evictedQueue { +func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. - return evictedQueue{capacity: capacity} + return evictedQueue[Event]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + } +} + +func newEvictedQueueLink(capacity int) evictedQueue[Link] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Link]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + } } // add adds value to the evictedQueue eq. If eq is at capacity, the oldest // queued value will be discarded and the drop count incremented. -func (eq *evictedQueue) add(value interface{}) { +func (eq *evictedQueue[T]) add(value T) { if eq.capacity == 0 { eq.droppedCount++ + eq.logDropped() return } @@ -28,6 +48,12 @@ func (eq *evictedQueue) add(value interface{}) { copy(eq.queue[:eq.capacity-1], eq.queue[1:]) eq.queue = eq.queue[:eq.capacity-1] eq.droppedCount++ + eq.logDropped() } eq.queue = append(eq.queue, value) } + +// copy returns a copy of the evictedQueue. +func (eq *evictedQueue[T]) copy() []T { + return slices.Clone(eq.queue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index f9633d8c57..925bcf9930 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -41,7 +41,12 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace gen.Lock() defer gen.Unlock() sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return sid } @@ -51,9 +56,19 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. gen.Lock() defer gen.Unlock() tid := trace.TraceID{} - _, _ = gen.randSource.Read(tid[:]) sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(tid[:]) + if tid.IsValid() { + break + } + } + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return tid, sid } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index dec237ca73..14c2e5bebd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -291,7 +291,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { retErr = err } else { // Poor man's list of errors - retErr = fmt.Errorf("%v; %v", retErr, err) + retErr = fmt.Errorf("%w; %w", retErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 32f862790c..d511d0f271 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns information about the instrumentation // library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index f0221eaa85..4945f50830 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -17,10 +17,10 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -62,7 +62,7 @@ type ReadOnlySpan interface { // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span @@ -137,12 +137,13 @@ type recordingSpan struct { // ReadOnlySpan exported when the span ends. attributes []attribute.KeyValue droppedAttributes int + logDropAttrsOnce sync.Once // events are stored in FIFO queue capped by configured limit. - events evictedQueue + events evictedQueue[Event] // links are stored in FIFO queue capped by configured limit. - links evictedQueue + links evictedQueue[Link] // executionTracerTaskEnd ends the execution tracer span. executionTracerTaskEnd func() @@ -219,7 +220,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { // No attributes allowed. - s.droppedAttributes += len(attributes) + s.addDroppedAttr(len(attributes)) return } @@ -236,7 +237,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) @@ -244,6 +245,22 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { } } +// Declared as a var so tests can override. +var logDropAttrs = func() { + global.Warn("limit reached: dropping trace Span attributes") +} + +// addDroppedAttr adds incr to the count of dropped attributes. +// +// The first, and only the first, time this method is called a warning will be +// logged. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addDroppedAttr(incr int) { + s.droppedAttributes += incr + s.logDropAttrsOnce.Do(logDropAttrs) +} + // addOverCapAttrs adds the attributes attrs to the span s while // de-duplicating the attributes of s and attrs and dropping attributes that // exceed the limit. @@ -273,7 +290,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } @@ -286,7 +303,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if len(s.attributes) >= limit { // Do not just drop all of the remaining attributes, make sure // updates are checked and performed. - s.droppedAttributes++ + s.addDroppedAttr(1) } else { a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) @@ -367,7 +384,7 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // Store the end time as soon as possible to avoid artificially increasing // the span's duration in case some operation below takes a while. - et := internal.MonotonicEndTime(s.startTime) + et := monotonicEndTime(s.startTime) // Do relative expensive check now that we have an end time and see if we // need to do any more processing. @@ -418,6 +435,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } } +// monotonicEndTime returns the end time at present but offset from start, +// monotonically. +// +// The monotonic clock is used in subtractions hence the duration since start +// added back to start gives end as a monotonic time. See +// https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func monotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} + // RecordError will record err as a span event for this span. An additional call to // SetStatus is required if the Status of the Span should be set to Error, this method // does not change the Span status. If this span is not being recorded or err is nil @@ -585,7 +612,7 @@ func (s *recordingSpan) Links() []Link { if len(s.links.queue) == 0 { return []Link{} } - return s.interfaceArrayToLinksArray() + return s.links.copy() } // Events returns the events of this span. @@ -595,7 +622,7 @@ func (s *recordingSpan) Events() []Event { if len(s.events.queue) == 0 { return []Event{} } - return s.interfaceArrayToEventArray() + return s.events.copy() } // Status returns the status of this span. @@ -615,7 +642,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope @@ -717,32 +744,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } sd.droppedAttributeCount = s.droppedAttributes if len(s.events.queue) > 0 { - sd.events = s.interfaceArrayToEventArray() + sd.events = s.events.copy() sd.droppedEventCount = s.events.droppedCount } if len(s.links.queue) > 0 { - sd.links = s.interfaceArrayToLinksArray() + sd.links = s.links.copy() sd.droppedLinkCount = s.links.droppedCount } return &sd } -func (s *recordingSpan) interfaceArrayToLinksArray() []Link { - linkArr := make([]Link, 0) - for _, value := range s.links.queue { - linkArr = append(linkArr, value.(Link)) - } - return linkArr -} - -func (s *recordingSpan) interfaceArrayToEventArray() []Event { - eventArr := make([]Event, 0) - for _, value := range s.events.queue { - eventArr = append(eventArr, value.(Event)) - } - return eventArr -} - func (s *recordingSpan) addChild() { if !s.IsRecording() { return diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 3668b1387d..43419d3b54 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -132,8 +132,8 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa spanKind: trace.ValidateSpanKind(config.SpanKind()), name: name, startTime: startTime, - events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), - links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), tracer: tr, } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index f0d8fc51a2..afa4baae80 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.27.0" + return "1.30.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index ada857995d..d5197e16ce 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -115,7 +115,7 @@ func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { name = hostPart } if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) + port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. } return } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md deleted file mode 100644 index 980fcc7df5..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.25.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.25.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.25.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/event.go deleted file mode 100644 index 4f0ccb4406..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/event.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" - -import "go.opentelemetry.io/otel/attribute" - -// This event represents an occurrence of a lifecycle transition on the iOS -// platform. -const ( - // IosStateKey is the attribute Key conforming to the "ios.state" semantic - // conventions. It represents the this attribute represents the state the - // application has transitioned into at the occurrence of the event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate - // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), - // and from which the `OS terminology` column values are derived. - IosStateKey = attribute.Key("ios.state") -) - -var ( - // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` - IosStateActive = IosStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` - IosStateInactive = IosStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` - IosStateBackground = IosStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` - IosStateForeground = IosStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` - IosStateTerminate = IosStateKey.String("terminate") -) - -// This event represents an occurrence of a lifecycle transition on the Android -// platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the this attribute represents the - // state the application has transitioned into at the occurrence of the - // event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// RPC received/sent message. -const ( - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessageTypeKey = attribute.Key("message.type") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/resource.go deleted file mode 100644 index 13affea0dd..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/resource.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" - -import "go.opentelemetry.io/otel/attribute" - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. It represents the none - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. It represents the none - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0.0' - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. It represents the none -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. It represents the none -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/trace.go deleted file mode 100644 index 5c5ede223f..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/trace.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 0000000000..2de1fc3c6b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go similarity index 81% rename from vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go index 30a51fa701..d8dc822b26 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -3,190 +3,33 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" import "go.opentelemetry.io/otel/attribute" -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.31.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. +// The Android platform on which the Android application is running. const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes Database attributes -const ( - // PoolNameKey is the attribute Key conforming to the "pool.name" semantic - // conventions. It represents the name of the connection pool; unique - // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, instrumentation should use a - // combination of `server.address` and `server.port` attributes formatted - // as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myDataSource' - PoolNameKey = attribute.Key("pool.name") - - // StateKey is the attribute Key conforming to the "state" semantic - // conventions. It represents the state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'idle' - StateKey = attribute.Key("state") -) - -var ( - // idle - StateIdle = StateKey.String("idle") - // used - StateUsed = StateKey.String("used") + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") ) -// PoolName returns an attribute KeyValue conforming to the "pool.name" -// semantic conventions. It represents the name of the connection pool; unique -// within the instrumented application. In case the connection pool -// implementation doesn't provide a name, instrumentation should use a -// combination of `server.address` and `server.port` attributes formatted as -// `server.address:server.port`. -func PoolName(val string) attribute.KeyValue { - return PoolNameKey.String(val) +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) } // ASP.NET Core attributes @@ -215,13 +58,22 @@ const ( // Examples: 'Contoso.MyHandler' AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the // "aspnetcore.rate_limiting.policy" semantic conventions. It represents // the rate limiting policy name. // // Type: string - // RequirementLevel: ConditionallyRequired (if the matched endpoint for the - // request had a rate-limiting policy.) + // RequirementLevel: Optional // Stability: stable // Examples: 'fixed', 'sliding', 'token' AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") @@ -231,8 +83,7 @@ const ( // the flag indicating if request was handled by the application pipeline. // // Type: boolean - // RequirementLevel: ConditionallyRequired (if and only if the request was - // not handled.) + // RequirementLevel: Optional // Stability: stable // Examples: True AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") @@ -242,11 +93,20 @@ const ( // value that indicates whether the matched route is a fallback route. // // Type: boolean - // RequirementLevel: ConditionallyRequired (If and only if a route was - // successfully matched.) + // RequirementLevel: Optional // Stability: stable // Examples: True AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") ) var ( @@ -260,6 +120,24 @@ var ( AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") ) +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + // AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming // to the "aspnetcore.diagnostics.handler.type" semantic conventions. It // represents the full type name of the @@ -290,695 +168,271 @@ func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { return AspnetcoreRoutingIsFallbackKey.Bool(val) } -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Describes JVM buffer metric attributes. +// Generic attributes for AWS services. const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. // // Type: string - // RequirementLevel: Recommended + // RequirementLevel: Optional // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") ) -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) } -// Describes JVM memory metric attributes. +// Attributes for AWS DynamoDB. const ( - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// Attributes for process CPU metrics. -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state - // for this data point. A process SHOULD be characterized _either_ by data - // points with no `state` labels, _or only_ data points with `state` - // labels. + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. // - // Type: Enum + // Type: string[] // RequirementLevel: Optional // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") -// Describes System metric attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. // - // Type: string + // Type: boolean // RequirementLevel: Optional // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") -// Describes System CPU metric attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. // - // Type: int + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the CPU state for - // this data point. A system's CPU SHOULD be characterized *either* by data - // points with no `state` labels, *or only* data points with `state` - // labels. + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. // - // Type: Enum + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") -// Describes System Memory metric attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. // - // Type: Enum + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") -// Describes System Memory Paging metric attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. // - // Type: Enum + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field // - // Type: Enum + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. // - // Type: Enum + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") -// Describes Filesystem metric attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. // - // Type: string + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. // - // Type: Enum + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network metric attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process metric attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. // - // Type: string[] + // Type: double // RequirementLevel: Optional // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // - // Type: string[] + // Type: double // RequirementLevel: Optional // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. // - // Type: string[] + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: experimental // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. // - // Type: string[] + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int + // Type: int // RequirementLevel: Optional // Stability: experimental // Examples: 100 @@ -1146,128 +600,584 @@ func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { return AWSDynamoDBTotalSegmentsKey.Int(val) } -// The web browser attributes +// Attributes for AWS Elastic Container Service (ECS). const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. // - // Type: string[] + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. // - // Type: boolean + // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) } -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) } -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) } -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) } -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. // // Type: string // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). // - // Type: int + // Type: string[] // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through // an intermediary, `client.port` SHOULD represent the client port behind // any intermediaries, for example proxies, if it's available. ClientPortKey = attribute.Key("client.port") @@ -1810,7 +1720,7 @@ const ( // K8S defines a link to the container registry repository with digest // `"imageID": "registry.azurecr.io // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different + // The ID is assigned by the container runtime and can vary in different // environments. Consider using `oci.manifest.digest` if it is important to // identify the same image in different environments/runtimes. ContainerImageIDKey = attribute.Key("container.image.id") @@ -1963,367 +1873,108 @@ func ContainerRuntime(val string) attribute.KeyValue { return ContainerRuntimeKey.String(val) } -// The attributes used to describe telemetry in the context of databases. +// This group defines the attributes used to describe telemetry in the context +// of databases. const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool // - // Type: boolean + // Type: Enum // RequirementLevel: Optional // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. // - // Type: int + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. // - // Type: int + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") - - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") - - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") - - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") - - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. // // Type: Enum // RequirementLevel: Optional // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. DBSystemKey = attribute.Key("db.system") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") ) var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") ) var ( @@ -2433,73 +2084,294 @@ var ( DBSystemTrino = DBSystemKey.String("trino") ) -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) } -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) } -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) } -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) } -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) } -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It // represents the request payload size in bytes func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { return DBCosmosDBRequestContentLengthKey.Int(val) @@ -2519,6 +2391,30 @@ func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { return DBCosmosDBSubStatusCodeKey.Int(val) } +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + // DBElasticsearchClusterName returns an attribute KeyValue conforming to // the "db.elasticsearch.cluster.name" semantic conventions. It represents the // represents the identifier of an Elasticsearch cluster. @@ -2526,78 +2422,12 @@ func DBElasticsearchClusterName(val string) attribute.KeyValue { return DBElasticsearchClusterNameKey.String(val) } -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) } // Attributes for software deployments. @@ -2633,497 +2463,30 @@ func DeploymentEnvironment(val string) attribute.KeyValue { return DeploymentEnvironmentKey.String(val) } -// "Describes deprecated db attributes." +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. const ( - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // deprecated, use `server.address`, `server.port` attributes instead. + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. // - // Type: string + // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // deprecated, use `db.instance.id` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // removed, no replacement at this time. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") -) - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the deprecated, -// use `server.address`, `server.port` attributes instead. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// deprecated, use `db.instance.id` instead. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the removed, -// no replacement at this time. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. It represents the deprecated, use - // `network.protocol.name` instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. It represents the deprecated, use - // `http.request.method` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. It represents the - // deprecated, use `http.request.header.content-length` instead. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. It represents the - // deprecated, use `http.response.header.content-length` instead. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. It represents the deprecated, use `url.scheme` - // instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. It represents the deprecated, - // use `http.response.status_code` instead. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. It represents the deprecated, use `url.path` and - // `url.query` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/search?q=OpenTelemetry#SemConv' - HTTPTargetKey = attribute.Key("http.target") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. It represents the deprecated, use `url.full` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - HTTPURLKey = attribute.Key("http.url") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. It represents the deprecated, - // use `user_agent.original` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - HTTPUserAgentKey = attribute.Key("http.user_agent") -) - -var ( - // HTTP/1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. It represents the deprecated, use -// `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. It represents the -// deprecated, use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. It represents the -// deprecated, use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. It represents the deprecated, use `url.scheme` -// instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. It represents the deprecated, use -// `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. It represents the deprecated, use `url.path` and -// `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. It represents the deprecated, use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. It represents the deprecated, use -// `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// Describes deprecated messaging attributes. -const ( - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the "Deprecated, use `messaging.destination.partition.id` - // instead." - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") -) - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the "Deprecated, use -// `messaging.destination.partition.id` instead." -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. It represents the deprecated, use - // `server.address`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com' - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. It represents the deprecated, use `server.port`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 8080 - NetHostPortKey = attribute.Key("net.host.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. It represents the deprecated, use `server.address` - // on client spans and `client.address` on server spans. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com' - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. It represents the deprecated, use `server.port` on - // client spans and `client.port` on server spans. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 8080 - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. It represents the deprecated, - // use `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'amqp', 'http', 'mqtt' - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. It represents the - // deprecated, use `network.protocol.version`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3.1.1' - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. It represents the deprecated, - // use `network.transport` and `network.type`. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. It represents the deprecated, - // use `network.local.address`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/my.sock' - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. It represents the deprecated, - // use `network.local.port`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 8080 - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. It represents the deprecated, - // use `network.peer.address`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.0.1' - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. It represents the deprecated, - // no replacement at this time. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/my.sock' - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. It represents the deprecated, - // use `network.peer.port`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 65531 - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. It represents the deprecated, use - // `network.transport`. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - NetTransportKey = attribute.Key("net.transport") -) - -var ( - // IPv4 address - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. It represents the deprecated, use -// `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. It represents the deprecated, use -// `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. It represents the deprecated, use -// `server.address` on client spans and `client.address` on server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. It represents the deprecated, use -// `server.port` on client spans and `client.port` on server spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. It represents the deprecated, use -// `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. It represents the deprecated, -// use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. It represents the deprecated, use -// `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. It represents the deprecated, use -// `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use -// `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. It represents the deprecated, no -// replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. It represents the deprecated, use -// `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// Deprecated system attributes. -const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // deprecated, use `system.process.status` instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") -) + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) var ( - // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") - // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") - // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") - // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") ) // These attributes may be used to describe the receiver of a network @@ -3393,8 +2756,13 @@ const ( // Stability: stable // Examples: 'timeout', 'java.net.UnknownHostException', // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low + // Note: The `error.type` SHOULD be predictable, and SHOULD have low // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // // Instrumentations SHOULD document the list of errors they report. // // The cardinality of `error.type` within one instrumentation library @@ -3423,6 +2791,31 @@ var ( ErrorTypeOther = ErrorTypeKey.String("_OTHER") ) +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + // The shared attributes used to report a single exception associated with a // span or log. const ( @@ -3451,7 +2844,7 @@ const ( // will escape, if one checks for an active exception just before ending // the span, // as done in the [example for recording span - // exceptions](#recording-an-exception). + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). // // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, @@ -4126,36 +3519,367 @@ func GCPGceInstanceName(val string) attribute.KeyValue { return GCPGceInstanceNameKey.String(val) } -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. // - // Type: Enum + // Type: string // RequirementLevel: Optional // Stability: experimental - HostArchKey = attribute.Key("host.arch") + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. // - // Type: int + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. // - // Type: string + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' HostCPUFamilyKey = attribute.Key("host.cpu.family") // HostCPUModelIDKey is the attribute Key conforming to the @@ -4667,6 +4391,141 @@ func HTTPRoute(val string) attribute.KeyValue { return HTTPRouteKey.String(val) } +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + // Kubernetes resource attributes. const ( // K8SClusterNameKey is the attribute Key conforming to the @@ -4733,9 +4592,18 @@ const ( // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + // K8SCronJobNameKey is the attribute Key conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. @@ -4931,6 +4799,13 @@ func K8SContainerRestartCount(val int) attribute.KeyValue { return K8SContainerRestartCountKey.Int(val) } +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. @@ -5044,13 +4919,127 @@ func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } -// Attributes describing telemetry around messaging systems and messaging -// activities. +// Log attributes const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. // // Type: int // RequirementLevel: Optional @@ -5064,14 +5053,14 @@ const ( MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique + // "messaging.client.id" semantic conventions. It represents a unique // identifier for the client that consumes or produces a message. // // Type: string // RequirementLevel: Optional // Stability: experimental // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") + MessagingClientIDKey = attribute.Key("messaging.client.id") // MessagingDestinationAnonymousKey is the attribute Key conforming to the // "messaging.destination.anonymous" semantic conventions. It represents a @@ -5158,85 +5147,6 @@ const ( // SHOULD uniquely identify the broker. MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - // MessagingMessageBodySizeKey is the attribute Key conforming to the // "messaging.message.body.size" semantic conventions. It represents the // size of the message body in bytes. @@ -5285,226 +5195,52 @@ const ( // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message.id") - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. // - // Type: string + // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. // // Type: Enum // RequirementLevel: Optional // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents an identifier for - // the messaging system being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationPublish = MessagingOperationKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationCreate = MessagingOperationKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationDeliver = MessagingOperationKey.String("process") - // One or more messages are settled - MessagingOperationSettle = MessagingOperationKey.String("settle") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) var ( // Apache ActiveMQ @@ -5538,7 +5274,7 @@ func MessagingBatchMessageCount(val int) attribute.KeyValue { } // MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique +// "messaging.client.id" semantic conventions. It represents a unique // identifier for the client that consumes or produces a message. func MessagingClientID(val string) attribute.KeyValue { return MessagingClientIDKey.String(val) @@ -5597,30 +5333,92 @@ func MessagingDestinationPublishName(val string) attribute.KeyValue { return MessagingDestinationPublishNameKey.String(val) } -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) } -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) } -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) } +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + // MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to // the "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only applies @@ -5653,35 +5451,28 @@ func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { return MessagingKafkaMessageTombstoneKey.Bool(val) } -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) // MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue // conforming to the "messaging.rabbitmq.destination.routing_key" semantic @@ -5697,6 +5488,122 @@ func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { return MessagingRabbitmqMessageDeliveryTagKey.Int(val) } +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + // MessagingRocketmqClientGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the @@ -5752,105 +5659,279 @@ func MessagingRocketmqNamespace(val string) attribute.KeyValue { return MessagingRocketmqNamespaceKey.String(val) } -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. +// This group describes attributes specific to GCP Pub/Sub. const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. // - // Type: string + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. // - // Type: string + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. // - // Type: Enum + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). // // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. // - // Type: Enum + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") // NetworkLocalAddressKey is the attribute Key conforming to the // "network.local.address" semantic conventions. It represents the local @@ -6133,6 +6214,26 @@ func OciManifestDigest(val string) attribute.KeyValue { return OciManifestDigestKey.String(val) } +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + // The operating system (OS) on which the process represented by this resource // is running. const ( @@ -6241,6 +6342,105 @@ func OSVersion(val string) attribute.KeyValue { return OSVersionKey.String(val) } +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + // An operating system process. const ( // ProcessCommandKey is the attribute Key conforming to the @@ -6282,6 +6482,26 @@ const ( // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + // ProcessExecutableNameKey is the attribute Key conforming to the // "process.executable.name" semantic conventions. It represents the name // of the process executable. On Linux based systems, can be set to the @@ -6306,6 +6526,46 @@ const ( // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" // semantic conventions. It represents the username of the user that owns // the process. @@ -6316,6 +6576,16 @@ const ( // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + // ProcessParentPIDKey is the attribute Key conforming to the // "process.parent_pid" semantic conventions. It represents the parent // Process identifier (PPID). @@ -6335,12 +6605,32 @@ const ( // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. // - // Type: string + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string // RequirementLevel: Optional // Stability: experimental // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' @@ -6367,6 +6657,83 @@ const ( // Stability: experimental // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") ) // ProcessCommand returns an attribute KeyValue conforming to the @@ -6399,6 +6766,13 @@ func ProcessCommandLine(val string) attribute.KeyValue { return ProcessCommandLineKey.String(val) } +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + // ProcessExecutableName returns an attribute KeyValue conforming to the // "process.executable.name" semantic conventions. It represents the name of // the process executable. On Linux based systems, can be set to the `Name` in @@ -6417,6 +6791,35 @@ func ProcessExecutablePath(val string) attribute.KeyValue { return ProcessExecutablePathKey.String(val) } +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + // ProcessOwner returns an attribute KeyValue conforming to the // "process.owner" semantic conventions. It represents the username of the user // that owns the process. @@ -6437,6 +6840,20 @@ func ProcessPID(val int) attribute.KeyValue { return ProcessPIDKey.Int(val) } +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + // ProcessRuntimeDescription returns an attribute KeyValue conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a @@ -6461,6 +6878,70 @@ func ProcessRuntimeVersion(val string) attribute.KeyValue { return ProcessRuntimeVersionKey.String(val) } +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + // Attributes for remote procedure calls. const ( // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the @@ -6528,6 +7009,45 @@ const ( // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + // RPCMethodKey is the attribute Key conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method // being called, must be equal to the $method part in the span name. @@ -6641,6 +7161,13 @@ var ( RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") @@ -6686,6 +7213,28 @@ func RPCJsonrpcVersion(val string) attribute.KeyValue { return RPCJsonrpcVersionKey.String(val) } +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method being // called, must be equal to the $method part in the span name. @@ -6815,9 +7364,9 @@ const ( // Note: MUST be the same for all instances of horizontally scaled // services. If the value was not specified, SDKs MUST fallback to // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. ServiceNameKey = attribute.Key("service.name") // ServiceNamespaceKey is the attribute Key conforming to the @@ -6827,150 +7376,483 @@ const ( // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path // // Type: string // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state // - // Type: string + // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type // - // Type: string + // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") ) -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) } -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) } -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. +// Describes Network attributes const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute // - // Type: string + // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) // - // Type: int + // Type: Enum // RequirementLevel: Optional // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") ) -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) // Attributes for telemetry SDK. const ( @@ -7861,6 +8743,17 @@ const ( // with no trailing period. URLSubdomainKey = attribute.Key("url.subdomain") + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + // URLTopLevelDomainKey is the attribute Key conforming to the // "url.top_level_domain" semantic conventions. It represents the effective // top level domain (eTLD), also known as the domain suffix, is the last @@ -7956,6 +8849,14 @@ func URLSubdomain(val string) attribute.KeyValue { return URLSubdomainKey.String(val) } +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + // URLTopLevelDomain returns an attribute KeyValue conforming to the // "url.top_level_domain" semantic conventions. It represents the effective top // level domain (eTLD), also known as the domain suffix, is the last part of @@ -8036,3 +8937,60 @@ func UserAgentOriginal(val string) attribute.KeyValue { func UserAgentVersion(val string) attribute.KeyValue { return UserAgentVersionKey.String(val) } + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go similarity index 96% rename from vendor/go.opentelemetry.io/otel/semconv/v1.25.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go index facbdf5f10..d031bbea78 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.25.0 +// patterns for OpenTelemetry things. This package represents the v1.26.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/semconv/v1.25.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go index e36ab1e342..bfaee0d56e 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go similarity index 88% rename from vendor/go.opentelemetry.io/otel/semconv/v1.25.0/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go index 1c8b5bded5..fcdb9f4859 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -3,7 +3,7 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" const ( @@ -44,98 +44,201 @@ const ( ContainerNetworkIoUnit = "By" ContainerNetworkIoDescription = "Network bytes for the container." - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the number + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number // of connections that are currently in state described by the `state` // attribute. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental DBClientConnectionsUsageName = "db.client.connections.usage" DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." // DBClientConnectionsIdleMax is the metric conforming to the // "db.client.connections.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. + // deprecated, use `db.client.connection.idle.max` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." // DBClientConnectionsIdleMin is the metric conforming to the // "db.client.connections.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. + // deprecated, use `db.client.connection.idle.min` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsIdleMinName = "db.client.connections.idle.min" DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the maximum - // number of open connections allowed. + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsMaxName = "db.client.connections.max" DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." // DBClientConnectionsPendingRequests is the metric conforming to the // "db.client.connections.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. + // the deprecated, use `db.client.connection.pending_requests` instead. // Instrument: updowncounter // Unit: {request} // Stability: Experimental DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." // DBClientConnectionsTimeouts is the metric conforming to the // "db.client.connections.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. + // deprecated, use `db.client.connection.timeouts` instead. // Instrument: counter // Unit: {timeout} // Stability: Experimental DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." // DBClientConnectionsCreateTime is the metric conforming to the // "db.client.connections.create_time" semantic conventions. It represents the - // time it took to create a new connection. + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsCreateTimeName = "db.client.connections.create_time" DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." // DBClientConnectionsWaitTime is the metric conforming to the // "db.client.connections.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." // DBClientConnectionsUseTime is the metric conforming to the // "db.client.connections.use_time" semantic conventions. It represents the - // time between borrowing a connection and returning it to the pool. + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsUseTimeName = "db.client.connections.use_time" DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" // semantic conventions. It represents the measures the time taken to perform a @@ -1006,6 +1109,16 @@ const ( SystemMemoryLimitUnit = "By" SystemMemoryLimitDescription = "Total memory available in the system." + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + // SystemMemoryUtilization is the metric conforming to the // "system.memory.utilization" semantic conventions. // Instrument: gauge diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.25.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go index 584431153a..4c87c7adcc 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.25.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 5650a174b4..8c45a7107f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index d661c5d100..cdbf41d6d7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 1dfa52c521..64a4f1b362 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -67,11 +67,13 @@ func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) span = Span{sc: sc} } else { // No parent, return a No-Op span with an empty span context. - span = Span{} + span = noopSpanInstance } return trace.ContextWithSpan(ctx, span), span } +var noopSpanInstance trace.Span = Span{} + // Span is an OpenTelemetry No-Op Span. type Span struct { embedded.Span diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000..ef85cb70c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000..d3aa476ee1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab4..d49adf671b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000..77952d2a0b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf2433..dc5e34cad0 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000..c9b7cdbbfe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 102f2f508b..78b40f3ed2 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.27.0" + return "1.30.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 60985f4362..0c32f4fc46 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.27.0 + version: v1.30.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,21 +29,21 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.49.0 + version: v0.52.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.3.0 + version: v0.6.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.8 + version: v0.0.9 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index 7109088370..d7099c35bc 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -58,6 +58,11 @@ const ( SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0 // Bits 0-7 are used for trace flags. SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256 + SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512 ) // Enum value maps for SpanFlags. @@ -65,10 +70,14 @@ var ( SpanFlags_name = map[int32]string{ 0: "SPAN_FLAGS_DO_NOT_USE", 255: "SPAN_FLAGS_TRACE_FLAGS_MASK", + 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", + 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", } SpanFlags_value = map[string]int32{ - "SPAN_FLAGS_DO_NOT_USE": 0, - "SPAN_FLAGS_TRACE_FLAGS_MASK": 255, + "SPAN_FLAGS_DO_NOT_USE": 0, + "SPAN_FLAGS_TRACE_FLAGS_MASK": 255, + "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256, + "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512, } ) @@ -463,20 +472,27 @@ type Span struct { // The `span_id` of this span's parent span. If this is a root span, then this // field must be empty. The ID is an 8-byte array. ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` - // Flags, a bit field. 8 least significant bits are the trace - // flags as defined in W3C Trace Context specification. Readers - // MUST not assume that 24 most significant bits will be zero. - // To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. // // When creating span messages, if the message is logically forwarded from another source // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 24 bits MUST + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. // // [Optional]. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"` // A description of the span's operation. // @@ -848,14 +864,23 @@ type Span_Link struct { // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Flags, a bit field. 8 least significant bits are the trace - // flags as defined in W3C Trace Context specification. Readers - // MUST not assume that 24 most significant bits will be zero. - // When creating new spans, the most-significant 24-bits MUST be - // zero. To read the 8-bit W3C trace flag (use flags & - // SPAN_FLAGS_TRACE_FLAGS_MASK). [Optional]. + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. // // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"` } @@ -1073,19 +1098,24 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a, - 0x48, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a, 0x15, - 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, - 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e, 0x5f, - 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, - 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, - 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x9c, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a, + 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x53, 0x50, + 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, + 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x49, 0x53, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, + 0x41, 0x53, 0x4b, 0x10, 0x80, 0x02, 0x12, 0x26, 0x0a, 0x21, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x49, 0x53, 0x5f, + 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x80, 0x04, 0x42, 0x77, + 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 6713accac0..c3895478ed 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -1,243 +1,2791 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - // func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) + RET - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) - RET +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 PXOR X1, X0 PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 PXOR X1, X0 PXOR X2, X0 PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 9ae8206c20..f75162e039 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -1,722 +1,4517 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 JGE noinc INCQ R9 MOVQ R9, 8(DX) noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) VZEROUPPER - RET -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 VMOVDQA X0, X8 VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 VMOVDQU 16(AX), X11 VMOVDQU 32(AX), X2 VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 VMOVDQA X10, X0 VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 VMOVDQA X12, 16(R10) VMOVDQA X13, 32(R10) VMOVDQA X14, 48(R10) VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 VMOVDQA X12, 80(R10) VMOVDQA X13, 96(R10) VMOVDQA X14, 112(R10) VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 VMOVDQA X12, 144(R10) VMOVDQA X13, 160(R10) VMOVDQA X14, 176(R10) VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 VMOVDQA X12, 208(R10) VMOVDQA X13, 224(R10) VMOVDQA X14, 240(R10) VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff VMOVDQU 32(AX), X14 VMOVDQU 48(AX), X15 VPXOR X0, X10, X10 @@ -729,16 +4524,36 @@ noinc: VPXOR X7, X15, X3 VMOVDQU X2, 32(AX) VMOVDQU X3, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) + VMOVDQU X11, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VZEROUPPER + RET - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 - RET +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index adfac00c15..9a0ce21244 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -1,278 +1,1441 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - // func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + RET - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 - RET +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go index 425e8eecb0..016e90215c 100644 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" +package cast5 import ( "errors" diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go index cda8e3edfd..90ef6a241d 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -4,7 +4,7 @@ // Package asn1 contains supporting types for parsing and building ASN.1 // messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" +package asn1 // Tag represents an ASN.1 identifier octet, consisting of a tag number // (indicating a type) and class (such as context-specific or constructed). diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go index 10692a8a31..4b0f8097f9 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -15,7 +15,7 @@ // // See the documentation and examples for the Builder and String types to get // started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" +package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index f4ded5fee2..3bee66294e 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -8,7 +8,7 @@ // HKDF is a cryptographic key derivation function (KDF) with the goal of // expanding limited input keying material into one or more cryptographically // strong secret keys. -package hkdf // import "golang.org/x/crypto/hkdf" +package hkdf import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index e0d3c64756..133757384b 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -1,108 +1,93 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. //go:build gc && !purego -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) +// func update(state *macState, msg []byte) TEXT ·update(SB), $0-32 MOVQ state+0(FP), DI MOVQ msg_base+8(FP), SI MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 JB bytes_between_0_and_15 loop: - POLY1305_ADD(SI, R8, R9, R10) + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop bytes_between_0_and_15: TESTQ R15, R15 JZ done - MOVQ $1, BX + MOVQ $0x00000001, BX XORQ CX, CX XORQ R13, R13 ADDQ R15, SI flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX + SHLQ $0x08, BX, CX + SHLQ $0x08, BX MOVB -1(SI), R13 XORQ R13, BX DECQ SI DECQ R15 JNZ flush_buffer - ADDQ BX, R8 ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 JMP multiply done: - MOVQ R8, 0(DI) + MOVQ R8, (DI) MOVQ R9, 8(DI) MOVQ R10, 16(DI) RET diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go index f3c3242a04..1fe600ad03 100644 --- a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -32,7 +32,7 @@ chunk size. This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. */ -package secretbox // import "golang.org/x/crypto/nacl/secretbox" +package secretbox import ( "golang.org/x/crypto/internal/alias" diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go index 8907183ec0..e664d127cb 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -10,14 +10,15 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" +package armor import ( "bufio" "bytes" "encoding/base64" - "golang.org/x/crypto/openpgp/errors" "io" + + "golang.org/x/crypto/openpgp/errors" ) // A Block represents an OpenPGP armored structure. diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go index 743b35a120..f922bdbcaa 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -16,7 +16,7 @@ // https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has // compatibility and security issues (see https://eprint.iacr.org/2021/923). // Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" +package elgamal import ( "crypto/rand" diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go index 1d7a0ea05a..a328749471 100644 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -9,7 +9,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" +package errors import ( "strconv" diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go index 0a19794a8e..a84a1a214e 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -10,7 +10,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" +package packet import ( "bufio" diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go index 48a8931468..cff3db9196 100644 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -9,7 +9,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" +package openpgp import ( "crypto" diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go index f53244a1c7..fa1a919079 100644 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -10,7 +10,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" +package s2k import ( "crypto" diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go index 3fd05b2751..3685b34458 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package salsa provides low-level access to functions in the Salsa family. -package salsa // import "golang.org/x/crypto/salsa20/salsa" +package salsa import "math/bits" diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s index fcce0234b6..3883e0ec22 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -1,880 +1,880 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run salsa20_amd64_asm.go -out ../salsa20_amd64.s -pkg salsa. DO NOT EDIT. //go:build amd64 && !purego && gc -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html +// func salsa2020XORKeyStream(out *byte, in *byte, n uint64, nonce *byte, key *byte) +// Requires: SSE2 +TEXT ·salsa2020XORKeyStream(SB), $456-40 + // This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + MOVQ n+16(FP), DX + MOVQ nonce+24(FP), CX + MOVQ key+32(FP), R8 + MOVQ SP, R12 + ADDQ $0x1f, R12 + ANDQ $-32, R12 + MOVQ DX, R9 + MOVQ CX, DX + MOVQ R8, R10 + CMPQ R9, $0x00 + JBE DONE + MOVL 20(R10), CX + MOVL (R10), R8 + MOVL (DX), AX + MOVL 16(R10), R11 + MOVL CX, (R12) + MOVL R8, 4(R12) + MOVL AX, 8(R12) + MOVL R11, 12(R12) + MOVL 8(DX), CX + MOVL 24(R10), R8 + MOVL 4(R10), AX + MOVL 4(DX), R11 + MOVL CX, 16(R12) + MOVL R8, 20(R12) + MOVL AX, 24(R12) + MOVL R11, 28(R12) + MOVL 12(DX), CX + MOVL 12(R10), DX + MOVL 28(R10), R8 + MOVL 8(R10), AX + MOVL DX, 32(R12) + MOVL CX, 36(R12) + MOVL R8, 40(R12) + MOVL AX, 44(R12) + MOVQ $0x61707865, DX + MOVQ $0x3320646e, CX + MOVQ $0x79622d32, R8 + MOVQ $0x6b206574, AX + MOVL DX, 48(R12) + MOVL CX, 52(R12) + MOVL R8, 56(R12) + MOVL AX, 60(R12) + CMPQ R9, $0x00000100 + JB BYTESBETWEEN1AND255 + MOVOA 48(R12), X0 + PSHUFL $0x55, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X3 + PSHUFL $0x00, X0, X0 + MOVOA X1, 64(R12) + MOVOA X2, 80(R12) + MOVOA X3, 96(R12) + MOVOA X0, 112(R12) + MOVOA (R12), X0 + PSHUFL $0xaa, X0, X1 + PSHUFL $0xff, X0, X2 + PSHUFL $0x00, X0, X3 + PSHUFL $0x55, X0, X0 + MOVOA X1, 128(R12) + MOVOA X2, 144(R12) + MOVOA X3, 160(R12) + MOVOA X0, 176(R12) + MOVOA 16(R12), X0 + PSHUFL $0xff, X0, X1 + PSHUFL $0x55, X0, X2 + PSHUFL $0xaa, X0, X0 + MOVOA X1, 192(R12) + MOVOA X2, 208(R12) + MOVOA X0, 224(R12) + MOVOA 32(R12), X0 + PSHUFL $0x00, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X0 + MOVOA X1, 240(R12) + MOVOA X2, 256(R12) + MOVOA X0, 272(R12) -// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. -TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment - MOVQ out+0(FP),DI - MOVQ in+8(FP),SI - MOVQ n+16(FP),DX - MOVQ nonce+24(FP),CX - MOVQ key+32(FP),R8 +BYTESATLEAST256: + MOVL 16(R12), DX + MOVL 36(R12), CX + MOVL DX, 288(R12) + MOVL CX, 304(R12) + SHLQ $0x20, CX + ADDQ CX, DX + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 292(R12) + MOVL CX, 308(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 296(R12) + MOVL CX, 312(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 300(R12) + MOVL CX, 316(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 16(R12) + MOVL CX, 36(R12) + MOVQ R9, 352(R12) + MOVQ $0x00000014, DX + MOVOA 64(R12), X0 + MOVOA 80(R12), X1 + MOVOA 96(R12), X2 + MOVOA 256(R12), X3 + MOVOA 272(R12), X4 + MOVOA 128(R12), X5 + MOVOA 144(R12), X6 + MOVOA 176(R12), X7 + MOVOA 192(R12), X8 + MOVOA 208(R12), X9 + MOVOA 224(R12), X10 + MOVOA 304(R12), X11 + MOVOA 112(R12), X12 + MOVOA 160(R12), X13 + MOVOA 240(R12), X14 + MOVOA 288(R12), X15 - MOVQ SP,R12 - ADDQ $31, R12 - ANDQ $~31, R12 +MAINLOOP1: + MOVOA X1, 320(R12) + MOVOA X2, 336(R12) + MOVOA X13, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X14 + PSRLL $0x19, X2 + PXOR X2, X14 + MOVOA X7, X1 + PADDL X0, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X11 + PSRLL $0x19, X2 + PXOR X2, X11 + MOVOA X12, X1 + PADDL X14, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X15 + PSRLL $0x17, X2 + PXOR X2, X15 + MOVOA X0, X1 + PADDL X11, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X9 + PSRLL $0x17, X2 + PXOR X2, X9 + MOVOA X14, X1 + PADDL X15, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X13 + PSRLL $0x13, X2 + PXOR X2, X13 + MOVOA X11, X1 + PADDL X9, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X7 + PSRLL $0x13, X2 + PXOR X2, X7 + MOVOA X15, X1 + PADDL X13, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA 320(R12), X1 + MOVOA X12, 320(R12) + MOVOA X9, X2 + PADDL X7, X2 + MOVOA X2, X12 + PSLLL $0x12, X2 + PXOR X2, X0 + PSRLL $0x0e, X12 + PXOR X12, X0 + MOVOA X5, X2 + PADDL X1, X2 + MOVOA X2, X12 + PSLLL $0x07, X2 + PXOR X2, X3 + PSRLL $0x19, X12 + PXOR X12, X3 + MOVOA 336(R12), X2 + MOVOA X0, 336(R12) + MOVOA X6, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X4 + PSRLL $0x19, X12 + PXOR X12, X4 + MOVOA X1, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X10 + PSRLL $0x17, X12 + PXOR X12, X10 + MOVOA X2, X0 + PADDL X4, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X8 + PSRLL $0x17, X12 + PXOR X12, X8 + MOVOA X3, X0 + PADDL X10, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X5 + PSRLL $0x13, X12 + PXOR X12, X5 + MOVOA X4, X0 + PADDL X8, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X6 + PSRLL $0x13, X12 + PXOR X12, X6 + MOVOA X10, X0 + PADDL X5, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA 320(R12), X0 + MOVOA X1, 320(R12) + MOVOA X4, X1 + PADDL X0, X1 + MOVOA X1, X12 + PSLLL $0x07, X1 + PXOR X1, X7 + PSRLL $0x19, X12 + PXOR X12, X7 + MOVOA X8, X1 + PADDL X6, X1 + MOVOA X1, X12 + PSLLL $0x12, X1 + PXOR X1, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 336(R12), X12 + MOVOA X2, 336(R12) + MOVOA X14, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X5 + PSRLL $0x19, X2 + PXOR X2, X5 + MOVOA X0, X1 + PADDL X7, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X10 + PSRLL $0x17, X2 + PXOR X2, X10 + MOVOA X12, X1 + PADDL X5, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X8 + PSRLL $0x17, X2 + PXOR X2, X8 + MOVOA X7, X1 + PADDL X10, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X4 + PSRLL $0x13, X2 + PXOR X2, X4 + MOVOA X5, X1 + PADDL X8, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X14 + PSRLL $0x13, X2 + PXOR X2, X14 + MOVOA X10, X1 + PADDL X4, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X0 + PSRLL $0x0e, X2 + PXOR X2, X0 + MOVOA 320(R12), X1 + MOVOA X0, 320(R12) + MOVOA X8, X0 + PADDL X14, X0 + MOVOA X0, X2 + PSLLL $0x12, X0 + PXOR X0, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA X11, X0 + PADDL X1, X0 + MOVOA X0, X2 + PSLLL $0x07, X0 + PXOR X0, X6 + PSRLL $0x19, X2 + PXOR X2, X6 + MOVOA 336(R12), X2 + MOVOA X12, 336(R12) + MOVOA X3, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X13 + PSRLL $0x19, X12 + PXOR X12, X13 + MOVOA X1, X0 + PADDL X6, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X15 + PSRLL $0x17, X12 + PXOR X12, X15 + MOVOA X2, X0 + PADDL X13, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X9 + PSRLL $0x17, X12 + PXOR X12, X9 + MOVOA X6, X0 + PADDL X15, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X11 + PSRLL $0x13, X12 + PXOR X12, X11 + MOVOA X13, X0 + PADDL X9, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X3 + PSRLL $0x13, X12 + PXOR X12, X3 + MOVOA X15, X0 + PADDL X11, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA X9, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 320(R12), X12 + MOVOA 336(R12), X0 + SUBQ $0x02, DX + JA MAINLOOP1 + PADDL 112(R12), X12 + PADDL 176(R12), X7 + PADDL 224(R12), X10 + PADDL 272(R12), X4 + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL (SI), DX + XORL 4(SI), CX + XORL 8(SI), R8 + XORL 12(SI), R9 + MOVL DX, (DI) + MOVL CX, 4(DI) + MOVL R8, 8(DI) + MOVL R9, 12(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 64(SI), DX + XORL 68(SI), CX + XORL 72(SI), R8 + XORL 76(SI), R9 + MOVL DX, 64(DI) + MOVL CX, 68(DI) + MOVL R8, 72(DI) + MOVL R9, 76(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 128(SI), DX + XORL 132(SI), CX + XORL 136(SI), R8 + XORL 140(SI), R9 + MOVL DX, 128(DI) + MOVL CX, 132(DI) + MOVL R8, 136(DI) + MOVL R9, 140(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + XORL 192(SI), DX + XORL 196(SI), CX + XORL 200(SI), R8 + XORL 204(SI), R9 + MOVL DX, 192(DI) + MOVL CX, 196(DI) + MOVL R8, 200(DI) + MOVL R9, 204(DI) + PADDL 240(R12), X14 + PADDL 64(R12), X0 + PADDL 128(R12), X5 + PADDL 192(R12), X8 + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 16(SI), DX + XORL 20(SI), CX + XORL 24(SI), R8 + XORL 28(SI), R9 + MOVL DX, 16(DI) + MOVL CX, 20(DI) + MOVL R8, 24(DI) + MOVL R9, 28(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 80(SI), DX + XORL 84(SI), CX + XORL 88(SI), R8 + XORL 92(SI), R9 + MOVL DX, 80(DI) + MOVL CX, 84(DI) + MOVL R8, 88(DI) + MOVL R9, 92(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 144(SI), DX + XORL 148(SI), CX + XORL 152(SI), R8 + XORL 156(SI), R9 + MOVL DX, 144(DI) + MOVL CX, 148(DI) + MOVL R8, 152(DI) + MOVL R9, 156(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + XORL 208(SI), DX + XORL 212(SI), CX + XORL 216(SI), R8 + XORL 220(SI), R9 + MOVL DX, 208(DI) + MOVL CX, 212(DI) + MOVL R8, 216(DI) + MOVL R9, 220(DI) + PADDL 288(R12), X15 + PADDL 304(R12), X11 + PADDL 80(R12), X1 + PADDL 144(R12), X6 + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 32(SI), DX + XORL 36(SI), CX + XORL 40(SI), R8 + XORL 44(SI), R9 + MOVL DX, 32(DI) + MOVL CX, 36(DI) + MOVL R8, 40(DI) + MOVL R9, 44(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 96(SI), DX + XORL 100(SI), CX + XORL 104(SI), R8 + XORL 108(SI), R9 + MOVL DX, 96(DI) + MOVL CX, 100(DI) + MOVL R8, 104(DI) + MOVL R9, 108(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 160(SI), DX + XORL 164(SI), CX + XORL 168(SI), R8 + XORL 172(SI), R9 + MOVL DX, 160(DI) + MOVL CX, 164(DI) + MOVL R8, 168(DI) + MOVL R9, 172(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + XORL 224(SI), DX + XORL 228(SI), CX + XORL 232(SI), R8 + XORL 236(SI), R9 + MOVL DX, 224(DI) + MOVL CX, 228(DI) + MOVL R8, 232(DI) + MOVL R9, 236(DI) + PADDL 160(R12), X13 + PADDL 208(R12), X9 + PADDL 256(R12), X3 + PADDL 96(R12), X2 + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 48(SI), DX + XORL 52(SI), CX + XORL 56(SI), R8 + XORL 60(SI), R9 + MOVL DX, 48(DI) + MOVL CX, 52(DI) + MOVL R8, 56(DI) + MOVL R9, 60(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 112(SI), DX + XORL 116(SI), CX + XORL 120(SI), R8 + XORL 124(SI), R9 + MOVL DX, 112(DI) + MOVL CX, 116(DI) + MOVL R8, 120(DI) + MOVL R9, 124(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 176(SI), DX + XORL 180(SI), CX + XORL 184(SI), R8 + XORL 188(SI), R9 + MOVL DX, 176(DI) + MOVL CX, 180(DI) + MOVL R8, 184(DI) + MOVL R9, 188(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + XORL 240(SI), DX + XORL 244(SI), CX + XORL 248(SI), R8 + XORL 252(SI), R9 + MOVL DX, 240(DI) + MOVL CX, 244(DI) + MOVL R8, 248(DI) + MOVL R9, 252(DI) + MOVQ 352(R12), R9 + SUBQ $0x00000100, R9 + ADDQ $0x00000100, SI + ADDQ $0x00000100, DI + CMPQ R9, $0x00000100 + JAE BYTESATLEAST256 + CMPQ R9, $0x00 + JBE DONE - MOVQ DX,R9 - MOVQ CX,DX - MOVQ R8,R10 - CMPQ R9,$0 - JBE DONE - START: - MOVL 20(R10),CX - MOVL 0(R10),R8 - MOVL 0(DX),AX - MOVL 16(R10),R11 - MOVL CX,0(R12) - MOVL R8, 4 (R12) - MOVL AX, 8 (R12) - MOVL R11, 12 (R12) - MOVL 8(DX),CX - MOVL 24(R10),R8 - MOVL 4(R10),AX - MOVL 4(DX),R11 - MOVL CX,16(R12) - MOVL R8, 20 (R12) - MOVL AX, 24 (R12) - MOVL R11, 28 (R12) - MOVL 12(DX),CX - MOVL 12(R10),DX - MOVL 28(R10),R8 - MOVL 8(R10),AX - MOVL DX,32(R12) - MOVL CX, 36 (R12) - MOVL R8, 40 (R12) - MOVL AX, 44 (R12) - MOVQ $1634760805,DX - MOVQ $857760878,CX - MOVQ $2036477234,R8 - MOVQ $1797285236,AX - MOVL DX,48(R12) - MOVL CX, 52 (R12) - MOVL R8, 56 (R12) - MOVL AX, 60 (R12) - CMPQ R9,$256 - JB BYTESBETWEEN1AND255 - MOVOA 48(R12),X0 - PSHUFL $0X55,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X3 - PSHUFL $0X00,X0,X0 - MOVOA X1,64(R12) - MOVOA X2,80(R12) - MOVOA X3,96(R12) - MOVOA X0,112(R12) - MOVOA 0(R12),X0 - PSHUFL $0XAA,X0,X1 - PSHUFL $0XFF,X0,X2 - PSHUFL $0X00,X0,X3 - PSHUFL $0X55,X0,X0 - MOVOA X1,128(R12) - MOVOA X2,144(R12) - MOVOA X3,160(R12) - MOVOA X0,176(R12) - MOVOA 16(R12),X0 - PSHUFL $0XFF,X0,X1 - PSHUFL $0X55,X0,X2 - PSHUFL $0XAA,X0,X0 - MOVOA X1,192(R12) - MOVOA X2,208(R12) - MOVOA X0,224(R12) - MOVOA 32(R12),X0 - PSHUFL $0X00,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X0 - MOVOA X1,240(R12) - MOVOA X2,256(R12) - MOVOA X0,272(R12) - BYTESATLEAST256: - MOVL 16(R12),DX - MOVL 36 (R12),CX - MOVL DX,288(R12) - MOVL CX,304(R12) - SHLQ $32,CX - ADDQ CX,DX - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 292 (R12) - MOVL CX, 308 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 296 (R12) - MOVL CX, 312 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 300 (R12) - MOVL CX, 316 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX,16(R12) - MOVL CX, 36 (R12) - MOVQ R9,352(R12) - MOVQ $20,DX - MOVOA 64(R12),X0 - MOVOA 80(R12),X1 - MOVOA 96(R12),X2 - MOVOA 256(R12),X3 - MOVOA 272(R12),X4 - MOVOA 128(R12),X5 - MOVOA 144(R12),X6 - MOVOA 176(R12),X7 - MOVOA 192(R12),X8 - MOVOA 208(R12),X9 - MOVOA 224(R12),X10 - MOVOA 304(R12),X11 - MOVOA 112(R12),X12 - MOVOA 160(R12),X13 - MOVOA 240(R12),X14 - MOVOA 288(R12),X15 - MAINLOOP1: - MOVOA X1,320(R12) - MOVOA X2,336(R12) - MOVOA X13,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X14 - PSRLL $25,X2 - PXOR X2,X14 - MOVOA X7,X1 - PADDL X0,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X11 - PSRLL $25,X2 - PXOR X2,X11 - MOVOA X12,X1 - PADDL X14,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X15 - PSRLL $23,X2 - PXOR X2,X15 - MOVOA X0,X1 - PADDL X11,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X9 - PSRLL $23,X2 - PXOR X2,X9 - MOVOA X14,X1 - PADDL X15,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X13 - PSRLL $19,X2 - PXOR X2,X13 - MOVOA X11,X1 - PADDL X9,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X7 - PSRLL $19,X2 - PXOR X2,X7 - MOVOA X15,X1 - PADDL X13,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA 320(R12),X1 - MOVOA X12,320(R12) - MOVOA X9,X2 - PADDL X7,X2 - MOVOA X2,X12 - PSLLL $18,X2 - PXOR X2,X0 - PSRLL $14,X12 - PXOR X12,X0 - MOVOA X5,X2 - PADDL X1,X2 - MOVOA X2,X12 - PSLLL $7,X2 - PXOR X2,X3 - PSRLL $25,X12 - PXOR X12,X3 - MOVOA 336(R12),X2 - MOVOA X0,336(R12) - MOVOA X6,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X4 - PSRLL $25,X12 - PXOR X12,X4 - MOVOA X1,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X10 - PSRLL $23,X12 - PXOR X12,X10 - MOVOA X2,X0 - PADDL X4,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X8 - PSRLL $23,X12 - PXOR X12,X8 - MOVOA X3,X0 - PADDL X10,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X5 - PSRLL $19,X12 - PXOR X12,X5 - MOVOA X4,X0 - PADDL X8,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X6 - PSRLL $19,X12 - PXOR X12,X6 - MOVOA X10,X0 - PADDL X5,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA 320(R12),X0 - MOVOA X1,320(R12) - MOVOA X4,X1 - PADDL X0,X1 - MOVOA X1,X12 - PSLLL $7,X1 - PXOR X1,X7 - PSRLL $25,X12 - PXOR X12,X7 - MOVOA X8,X1 - PADDL X6,X1 - MOVOA X1,X12 - PSLLL $18,X1 - PXOR X1,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 336(R12),X12 - MOVOA X2,336(R12) - MOVOA X14,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X5 - PSRLL $25,X2 - PXOR X2,X5 - MOVOA X0,X1 - PADDL X7,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X10 - PSRLL $23,X2 - PXOR X2,X10 - MOVOA X12,X1 - PADDL X5,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X8 - PSRLL $23,X2 - PXOR X2,X8 - MOVOA X7,X1 - PADDL X10,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X4 - PSRLL $19,X2 - PXOR X2,X4 - MOVOA X5,X1 - PADDL X8,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X14 - PSRLL $19,X2 - PXOR X2,X14 - MOVOA X10,X1 - PADDL X4,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X0 - PSRLL $14,X2 - PXOR X2,X0 - MOVOA 320(R12),X1 - MOVOA X0,320(R12) - MOVOA X8,X0 - PADDL X14,X0 - MOVOA X0,X2 - PSLLL $18,X0 - PXOR X0,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA X11,X0 - PADDL X1,X0 - MOVOA X0,X2 - PSLLL $7,X0 - PXOR X0,X6 - PSRLL $25,X2 - PXOR X2,X6 - MOVOA 336(R12),X2 - MOVOA X12,336(R12) - MOVOA X3,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X13 - PSRLL $25,X12 - PXOR X12,X13 - MOVOA X1,X0 - PADDL X6,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X15 - PSRLL $23,X12 - PXOR X12,X15 - MOVOA X2,X0 - PADDL X13,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X9 - PSRLL $23,X12 - PXOR X12,X9 - MOVOA X6,X0 - PADDL X15,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X11 - PSRLL $19,X12 - PXOR X12,X11 - MOVOA X13,X0 - PADDL X9,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X3 - PSRLL $19,X12 - PXOR X12,X3 - MOVOA X15,X0 - PADDL X11,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA X9,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 320(R12),X12 - MOVOA 336(R12),X0 - SUBQ $2,DX - JA MAINLOOP1 - PADDL 112(R12),X12 - PADDL 176(R12),X7 - PADDL 224(R12),X10 - PADDL 272(R12),X4 - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 0(SI),DX - XORL 4(SI),CX - XORL 8(SI),R8 - XORL 12(SI),R9 - MOVL DX,0(DI) - MOVL CX,4(DI) - MOVL R8,8(DI) - MOVL R9,12(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 64(SI),DX - XORL 68(SI),CX - XORL 72(SI),R8 - XORL 76(SI),R9 - MOVL DX,64(DI) - MOVL CX,68(DI) - MOVL R8,72(DI) - MOVL R9,76(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 128(SI),DX - XORL 132(SI),CX - XORL 136(SI),R8 - XORL 140(SI),R9 - MOVL DX,128(DI) - MOVL CX,132(DI) - MOVL R8,136(DI) - MOVL R9,140(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - XORL 192(SI),DX - XORL 196(SI),CX - XORL 200(SI),R8 - XORL 204(SI),R9 - MOVL DX,192(DI) - MOVL CX,196(DI) - MOVL R8,200(DI) - MOVL R9,204(DI) - PADDL 240(R12),X14 - PADDL 64(R12),X0 - PADDL 128(R12),X5 - PADDL 192(R12),X8 - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 16(SI),DX - XORL 20(SI),CX - XORL 24(SI),R8 - XORL 28(SI),R9 - MOVL DX,16(DI) - MOVL CX,20(DI) - MOVL R8,24(DI) - MOVL R9,28(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 80(SI),DX - XORL 84(SI),CX - XORL 88(SI),R8 - XORL 92(SI),R9 - MOVL DX,80(DI) - MOVL CX,84(DI) - MOVL R8,88(DI) - MOVL R9,92(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 144(SI),DX - XORL 148(SI),CX - XORL 152(SI),R8 - XORL 156(SI),R9 - MOVL DX,144(DI) - MOVL CX,148(DI) - MOVL R8,152(DI) - MOVL R9,156(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - XORL 208(SI),DX - XORL 212(SI),CX - XORL 216(SI),R8 - XORL 220(SI),R9 - MOVL DX,208(DI) - MOVL CX,212(DI) - MOVL R8,216(DI) - MOVL R9,220(DI) - PADDL 288(R12),X15 - PADDL 304(R12),X11 - PADDL 80(R12),X1 - PADDL 144(R12),X6 - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 32(SI),DX - XORL 36(SI),CX - XORL 40(SI),R8 - XORL 44(SI),R9 - MOVL DX,32(DI) - MOVL CX,36(DI) - MOVL R8,40(DI) - MOVL R9,44(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 96(SI),DX - XORL 100(SI),CX - XORL 104(SI),R8 - XORL 108(SI),R9 - MOVL DX,96(DI) - MOVL CX,100(DI) - MOVL R8,104(DI) - MOVL R9,108(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 160(SI),DX - XORL 164(SI),CX - XORL 168(SI),R8 - XORL 172(SI),R9 - MOVL DX,160(DI) - MOVL CX,164(DI) - MOVL R8,168(DI) - MOVL R9,172(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - XORL 224(SI),DX - XORL 228(SI),CX - XORL 232(SI),R8 - XORL 236(SI),R9 - MOVL DX,224(DI) - MOVL CX,228(DI) - MOVL R8,232(DI) - MOVL R9,236(DI) - PADDL 160(R12),X13 - PADDL 208(R12),X9 - PADDL 256(R12),X3 - PADDL 96(R12),X2 - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 48(SI),DX - XORL 52(SI),CX - XORL 56(SI),R8 - XORL 60(SI),R9 - MOVL DX,48(DI) - MOVL CX,52(DI) - MOVL R8,56(DI) - MOVL R9,60(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 112(SI),DX - XORL 116(SI),CX - XORL 120(SI),R8 - XORL 124(SI),R9 - MOVL DX,112(DI) - MOVL CX,116(DI) - MOVL R8,120(DI) - MOVL R9,124(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 176(SI),DX - XORL 180(SI),CX - XORL 184(SI),R8 - XORL 188(SI),R9 - MOVL DX,176(DI) - MOVL CX,180(DI) - MOVL R8,184(DI) - MOVL R9,188(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - XORL 240(SI),DX - XORL 244(SI),CX - XORL 248(SI),R8 - XORL 252(SI),R9 - MOVL DX,240(DI) - MOVL CX,244(DI) - MOVL R8,248(DI) - MOVL R9,252(DI) - MOVQ 352(R12),R9 - SUBQ $256,R9 - ADDQ $256,SI - ADDQ $256,DI - CMPQ R9,$256 - JAE BYTESATLEAST256 - CMPQ R9,$0 - JBE DONE - BYTESBETWEEN1AND255: - CMPQ R9,$64 - JAE NOCOPY - MOVQ DI,DX - LEAQ 360(R12),DI - MOVQ R9,CX +BYTESBETWEEN1AND255: + CMPQ R9, $0x40 + JAE NOCOPY + MOVQ DI, DX + LEAQ 360(R12), DI + MOVQ R9, CX REP; MOVSB - LEAQ 360(R12),DI - LEAQ 360(R12),SI - NOCOPY: - MOVQ R9,352(R12) - MOVOA 48(R12),X0 - MOVOA 0(R12),X1 - MOVOA 16(R12),X2 - MOVOA 32(R12),X3 - MOVOA X1,X4 - MOVQ $20,CX - MAINLOOP2: - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - SUBQ $4,CX - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PXOR X7,X7 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - JA MAINLOOP2 - PADDL 48(R12),X0 - PADDL 0(R12),X1 - PADDL 16(R12),X2 - PADDL 32(R12),X3 - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 0(SI),CX - XORL 48(SI),R8 - XORL 32(SI),R9 - XORL 16(SI),AX - MOVL CX,0(DI) - MOVL R8,48(DI) - MOVL R9,32(DI) - MOVL AX,16(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 20(SI),CX - XORL 4(SI),R8 - XORL 52(SI),R9 - XORL 36(SI),AX - MOVL CX,20(DI) - MOVL R8,4(DI) - MOVL R9,52(DI) - MOVL AX,36(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 40(SI),CX - XORL 24(SI),R8 - XORL 8(SI),R9 - XORL 56(SI),AX - MOVL CX,40(DI) - MOVL R8,24(DI) - MOVL R9,8(DI) - MOVL AX,56(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - XORL 60(SI),CX - XORL 44(SI),R8 - XORL 28(SI),R9 - XORL 12(SI),AX - MOVL CX,60(DI) - MOVL R8,44(DI) - MOVL R9,28(DI) - MOVL AX,12(DI) - MOVQ 352(R12),R9 - MOVL 16(R12),CX - MOVL 36 (R12),R8 - ADDQ $1,CX - SHLQ $32,R8 - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $32,R8 - MOVL CX,16(R12) - MOVL R8, 36 (R12) - CMPQ R9,$64 - JA BYTESATLEAST65 - JAE BYTESATLEAST64 - MOVQ DI,SI - MOVQ DX,DI - MOVQ R9,CX + LEAQ 360(R12), DI + LEAQ 360(R12), SI + +NOCOPY: + MOVQ R9, 352(R12) + MOVOA 48(R12), X0 + MOVOA (R12), X1 + MOVOA 16(R12), X2 + MOVOA 32(R12), X3 + MOVOA X1, X4 + MOVQ $0x00000014, CX + +MAINLOOP2: + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + SUBQ $0x04, CX + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PXOR X7, X7 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + JA MAINLOOP2 + PADDL 48(R12), X0 + PADDL (R12), X1 + PADDL 16(R12), X2 + PADDL 32(R12), X3 + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL (SI), CX + XORL 48(SI), R8 + XORL 32(SI), R9 + XORL 16(SI), AX + MOVL CX, (DI) + MOVL R8, 48(DI) + MOVL R9, 32(DI) + MOVL AX, 16(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 20(SI), CX + XORL 4(SI), R8 + XORL 52(SI), R9 + XORL 36(SI), AX + MOVL CX, 20(DI) + MOVL R8, 4(DI) + MOVL R9, 52(DI) + MOVL AX, 36(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 40(SI), CX + XORL 24(SI), R8 + XORL 8(SI), R9 + XORL 56(SI), AX + MOVL CX, 40(DI) + MOVL R8, 24(DI) + MOVL R9, 8(DI) + MOVL AX, 56(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + XORL 60(SI), CX + XORL 44(SI), R8 + XORL 28(SI), R9 + XORL 12(SI), AX + MOVL CX, 60(DI) + MOVL R8, 44(DI) + MOVL R9, 28(DI) + MOVL AX, 12(DI) + MOVQ 352(R12), R9 + MOVL 16(R12), CX + MOVL 36(R12), R8 + ADDQ $0x01, CX + SHLQ $0x20, R8 + ADDQ R8, CX + MOVQ CX, R8 + SHRQ $0x20, R8 + MOVL CX, 16(R12) + MOVL R8, 36(R12) + CMPQ R9, $0x40 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI, SI + MOVQ DX, DI + MOVQ R9, CX REP; MOVSB - BYTESATLEAST64: - DONE: + +BYTESATLEAST64: +DONE: RET - BYTESATLEAST65: - SUBQ $64,R9 - ADDQ $64,DI - ADDQ $64,SI - JMP BYTESBETWEEN1AND255 + +BYTESATLEAST65: + SUBQ $0x40, R9 + ADDQ $0x40, DI + ADDQ $0x40, SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index decd8cf9bf..7e02309070 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -59,4 +59,4 @@ // They produce output of the same length, with the same security strengths // against all attacks. This means, in particular, that SHA3-256 only has // 128-bit collision resistance, because its output length is 32 bytes. -package sha3 // import "golang.org/x/crypto/sha3" +package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index 5eae6cb922..c544b29e5f 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -9,6 +9,7 @@ package sha3 // bytes. import ( + "crypto" "hash" ) @@ -40,6 +41,13 @@ func New512() hash.Hash { return new512() } +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + func new224Generic() *state { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} } diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 1f53938861..99e2f16e97 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -1,390 +1,5419 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT. //go:build amd64 && !purego && gc -// This code was translated into a form compatible with 6a from the public -// domain sources at https://github.com/gvanas/KeccakCodePackage - -// Offsets in state -#define _ba (0*8) -#define _be (1*8) -#define _bi (2*8) -#define _bo (3*8) -#define _bu (4*8) -#define _ga (5*8) -#define _ge (6*8) -#define _gi (7*8) -#define _go (8*8) -#define _gu (9*8) -#define _ka (10*8) -#define _ke (11*8) -#define _ki (12*8) -#define _ko (13*8) -#define _ku (14*8) -#define _ma (15*8) -#define _me (16*8) -#define _mi (17*8) -#define _mo (18*8) -#define _mu (19*8) -#define _sa (20*8) -#define _se (21*8) -#define _si (22*8) -#define _so (23*8) -#define _su (24*8) - -// Temporary registers -#define rT1 AX - -// Round vars -#define rpState DI -#define rpStack SP - -#define rDa BX -#define rDe CX -#define rDi DX -#define rDo R8 -#define rDu R9 - -#define rBa R10 -#define rBe R11 -#define rBi R12 -#define rBo R13 -#define rBu R14 - -#define rCa SI -#define rCe BP -#define rCi rBi -#define rCo rBo -#define rCu R15 - -#define MOVQ_RBI_RCE MOVQ rBi, rCe -#define XORQ_RT1_RCA XORQ rT1, rCa -#define XORQ_RT1_RCE XORQ rT1, rCe -#define XORQ_RBA_RCU XORQ rBa, rCu -#define XORQ_RBE_RCU XORQ rBe, rCu -#define XORQ_RDU_RCU XORQ rDu, rCu -#define XORQ_RDA_RCA XORQ rDa, rCa -#define XORQ_RDE_RCE XORQ rDe, rCe - -#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ - /* Prepare round */ \ - MOVQ rCe, rDa; \ - ROLQ $1, rDa; \ - \ - MOVQ _bi(iState), rCi; \ - XORQ _gi(iState), rDi; \ - XORQ rCu, rDa; \ - XORQ _ki(iState), rCi; \ - XORQ _mi(iState), rDi; \ - XORQ rDi, rCi; \ - \ - MOVQ rCi, rDe; \ - ROLQ $1, rDe; \ - \ - MOVQ _bo(iState), rCo; \ - XORQ _go(iState), rDo; \ - XORQ rCa, rDe; \ - XORQ _ko(iState), rCo; \ - XORQ _mo(iState), rDo; \ - XORQ rDo, rCo; \ - \ - MOVQ rCo, rDi; \ - ROLQ $1, rDi; \ - \ - MOVQ rCu, rDo; \ - XORQ rCe, rDi; \ - ROLQ $1, rDo; \ - \ - MOVQ rCa, rDu; \ - XORQ rCi, rDo; \ - ROLQ $1, rDu; \ - \ - /* Result b */ \ - MOVQ _ba(iState), rBa; \ - MOVQ _ge(iState), rBe; \ - XORQ rCo, rDu; \ - MOVQ _ki(iState), rBi; \ - MOVQ _mo(iState), rBo; \ - MOVQ _su(iState), rBu; \ - XORQ rDe, rBe; \ - ROLQ $44, rBe; \ - XORQ rDi, rBi; \ - XORQ rDa, rBa; \ - ROLQ $43, rBi; \ - \ - MOVQ rBe, rCa; \ - MOVQ rc, rT1; \ - ORQ rBi, rCa; \ - XORQ rBa, rT1; \ - XORQ rT1, rCa; \ - MOVQ rCa, _ba(oState); \ - \ - XORQ rDu, rBu; \ - ROLQ $14, rBu; \ - MOVQ rBa, rCu; \ - ANDQ rBe, rCu; \ - XORQ rBu, rCu; \ - MOVQ rCu, _bu(oState); \ - \ - XORQ rDo, rBo; \ - ROLQ $21, rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _bi(oState); \ - \ - NOTQ rBi; \ - ORQ rBa, rBu; \ - ORQ rBo, rBi; \ - XORQ rBo, rBu; \ - XORQ rBe, rBi; \ - MOVQ rBu, _bo(oState); \ - MOVQ rBi, _be(oState); \ - B_RBI_RCE; \ - \ - /* Result g */ \ - MOVQ _gu(iState), rBe; \ - XORQ rDu, rBe; \ - MOVQ _ka(iState), rBi; \ - ROLQ $20, rBe; \ - XORQ rDa, rBi; \ - ROLQ $3, rBi; \ - MOVQ _bo(iState), rBa; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDo, rBa; \ - MOVQ _me(iState), rBo; \ - MOVQ _si(iState), rBu; \ - ROLQ $28, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ga(oState); \ - G_RT1_RCA; \ - \ - XORQ rDe, rBo; \ - ROLQ $45, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ge(oState); \ - G_RT1_RCE; \ - \ - XORQ rDi, rBu; \ - ROLQ $61, rBu; \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _go(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _gu(oState); \ - NOTQ rBu; \ - G_RBA_RCU; \ - \ - ORQ rBu, rBo; \ - XORQ rBi, rBo; \ - MOVQ rBo, _gi(oState); \ - \ - /* Result k */ \ - MOVQ _be(iState), rBa; \ - MOVQ _gi(iState), rBe; \ - MOVQ _ko(iState), rBi; \ - MOVQ _mu(iState), rBo; \ - MOVQ _sa(iState), rBu; \ - XORQ rDi, rBe; \ - ROLQ $6, rBe; \ - XORQ rDo, rBi; \ - ROLQ $25, rBi; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDe, rBa; \ - ROLQ $1, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ka(oState); \ - K_RT1_RCA; \ - \ - XORQ rDu, rBo; \ - ROLQ $8, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ke(oState); \ - K_RT1_RCE; \ - \ - XORQ rDa, rBu; \ - ROLQ $18, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _ki(oState); \ - \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _ko(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _ku(oState); \ - K_RBA_RCU; \ - \ - /* Result m */ \ - MOVQ _ga(iState), rBe; \ - XORQ rDa, rBe; \ - MOVQ _ke(iState), rBi; \ - ROLQ $36, rBe; \ - XORQ rDe, rBi; \ - MOVQ _bu(iState), rBa; \ - ROLQ $10, rBi; \ - MOVQ rBe, rT1; \ - MOVQ _mi(iState), rBo; \ - ANDQ rBi, rT1; \ - XORQ rDu, rBa; \ - MOVQ _so(iState), rBu; \ - ROLQ $27, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ma(oState); \ - M_RT1_RCA; \ - \ - XORQ rDi, rBo; \ - ROLQ $15, rBo; \ - MOVQ rBi, rT1; \ - ORQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _me(oState); \ - M_RT1_RCE; \ - \ - XORQ rDo, rBu; \ - ROLQ $56, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ORQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _mi(oState); \ - \ - ORQ rBa, rBe; \ - XORQ rBu, rBe; \ - MOVQ rBe, _mu(oState); \ - \ - ANDQ rBa, rBu; \ - XORQ rBo, rBu; \ - MOVQ rBu, _mo(oState); \ - M_RBE_RCU; \ - \ - /* Result s */ \ - MOVQ _bi(iState), rBa; \ - MOVQ _go(iState), rBe; \ - MOVQ _ku(iState), rBi; \ - XORQ rDi, rBa; \ - MOVQ _ma(iState), rBo; \ - ROLQ $62, rBa; \ - XORQ rDo, rBe; \ - MOVQ _se(iState), rBu; \ - ROLQ $55, rBe; \ - \ - XORQ rDu, rBi; \ - MOVQ rBa, rDu; \ - XORQ rDe, rBu; \ - ROLQ $2, rBu; \ - ANDQ rBe, rDu; \ - XORQ rBu, rDu; \ - MOVQ rDu, _su(oState); \ - \ - ROLQ $39, rBi; \ - S_RDU_RCU; \ - NOTQ rBe; \ - XORQ rDa, rBo; \ - MOVQ rBe, rDa; \ - ANDQ rBi, rDa; \ - XORQ rBa, rDa; \ - MOVQ rDa, _sa(oState); \ - S_RDA_RCA; \ - \ - ROLQ $41, rBo; \ - MOVQ rBi, rDe; \ - ORQ rBo, rDe; \ - XORQ rBe, rDe; \ - MOVQ rDe, _se(oState); \ - S_RDE_RCE; \ - \ - MOVQ rBo, rDi; \ - MOVQ rBu, rDo; \ - ANDQ rBu, rDi; \ - ORQ rBa, rDo; \ - XORQ rBi, rDi; \ - XORQ rBo, rDo; \ - MOVQ rDi, _si(oState); \ - MOVQ rDo, _so(oState) \ - // func keccakF1600(a *[25]uint64) -TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ a+0(FP), rpState +TEXT ·keccakF1600(SB), $200-8 + MOVQ a+0(FP), DI // Convert the user state into an internal state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) // Execute the KeccakF permutation - MOVQ _ba(rpState), rCa - MOVQ _be(rpState), rCe - MOVQ _bu(rpState), rCu - - XORQ _ga(rpState), rCa - XORQ _ge(rpState), rCe - XORQ _gu(rpState), rCu - - XORQ _ka(rpState), rCa - XORQ _ke(rpState), rCe - XORQ _ku(rpState), rCu - - XORQ _ma(rpState), rCa - XORQ _me(rpState), rCe - XORQ _mu(rpState), rCu - - XORQ _sa(rpState), rCa - XORQ _se(rpState), rCe - MOVQ _si(rpState), rDi - MOVQ _so(rpState), rDo - XORQ _su(rpState), rCu - - mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + MOVQ (DI), SI + MOVQ 8(DI), BP + MOVQ 32(DI), R15 + XORQ 40(DI), SI + XORQ 48(DI), BP + XORQ 72(DI), R15 + XORQ 80(DI), SI + XORQ 88(DI), BP + XORQ 112(DI), R15 + XORQ 120(DI), SI + XORQ 128(DI), BP + XORQ 152(DI), R15 + XORQ 160(DI), SI + XORQ 168(DI), BP + MOVQ 176(DI), DX + MOVQ 184(DI), R8 + XORQ 192(DI), R15 - // Revert the internal state to the user state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000008082, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000808a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008000, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000008a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000088, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000008b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008089, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008003, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008002, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000000080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000800a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008008, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + NOP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + NOP + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + NOP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + NOP + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + NOP + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + NOP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + NOP + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + NOP + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + NOP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + NOP + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + NOP + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + NOP + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + NOP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Revert the internal state to the user state + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) RET diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go deleted file mode 100644 index addfd5049b..0000000000 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.4 - -package sha3 - -import ( - "crypto" -) - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index 1ea9275b8b..a01ef43577 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { // leftEncode returns max 9 bytes c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) c.initBlock = append(c.initBlock, S...) c.Write(bytepad(c.initBlock, c.rate)) return &c diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index b67897f76b..f58bbc7ba4 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc sorts the slice x in ascending order as determined by the cmp // function. This sort is not guaranteed to be stable. // cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b. +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 0000000000..de58dfb8dc --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 0000000000..e3784123c8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 0000000000..060fd6c64c --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f30..7688c356b7 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3ec6..617b4a4762 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 98a49c6b6e..0c5f64aa8b 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,30 +775,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -838,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -852,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -871,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2203,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2349,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398a1..6ff6bee7e9 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780ec..ac76165ceb 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -8,7 +8,7 @@ // This package currently lacks some features found in an alternative // and more actively maintained WebSocket package: // -// https://pkg.go.dev/nhooyr.io/websocket +// https://pkg.go.dev/github.com/coder/websocket package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb332174..109997d77c 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/sys/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 8fa707aa4b..02609d5b21 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -105,6 +105,8 @@ var ARM64 struct { HasSVE bool // Scalable Vector Extensions HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions _ CacheLinePad } @@ -199,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 0e27a21e1f..af2aa99f9f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -38,6 +38,8 @@ func initOptions() { {Name: "dcpop", Feature: &ARM64.HasDCPOP}, {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, } } @@ -145,6 +147,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { ARM64.HasLRCPC = true } + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: @@ -168,6 +175,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { parseARM64SVERegister(getzfr0()) } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } } func parseARM64SVERegister(zfr0 uint64) { diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 3d386d0fc2..08f35ea177 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,8 +35,10 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -106,9 +108,12 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e73355..7d902b6847 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 0000000000..cb4a0c5728 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c004..aca3199c91 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e12..6e08a76a71 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488b6..ac54ecaba0 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include @@ -551,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -654,7 +656,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -664,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529d..3a5e776f89 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2d..6f15ba1eaf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897d..099867deed 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -542,6 +554,55 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8e..a6a2d2fc2b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e2628a..f08abd434f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) @@ -2592,3 +2653,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75e..745e5c7e6c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451f..dd2262a407 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a288944..8cf3670bda 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c71a..b86ded549c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c7..4e92e5aa40 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 0000000000..07ac8e09d1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 0000000000..297e97bce9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa85245..d73c4652e6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c05..4a55a40058 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b479..de3b462489 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -457,6 +457,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -494,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -928,6 +930,7 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 ESP_V4_FLOW = 0xa ESP_V6_FLOW = 0xc @@ -941,9 +944,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1705,6 +1705,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1781,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1861,6 +1863,19 @@ const ( MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -1908,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2173,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2342,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2417,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2498,6 +2517,23 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -2902,11 +2938,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3179,6 +3216,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3192,8 +3230,10 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3592,6 +3632,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd57c..8aa6d77c01 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -151,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317afdb..da428f4253 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -151,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668ac3..bf45bfec78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508b0..71c67162b7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 @@ -152,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afdcb..9476628fa0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -152,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92cb4..b9e85f3cf0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba78..a48b68a764 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0da..ea00e8522a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e1b..91c6468717 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3af..8cbf38d639 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa7281b..a2df734191 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e015033..2479137923 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -150,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e9f..d265f146ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214a7..3f2d644396 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -148,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d901..5d8b727a1c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -82,6 +82,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -153,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3d..1ec2b1407b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240a..24b346e1a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb28402..ebd213100b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b5..824b9c2d5e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab7..4f178a2293 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a1d..af30da5578 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -2229,3 +2212,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410b7..1851df14e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b5617316..0b43c69365 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751cd..e1ec0dbe4e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656f6..880c6d6e31 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776db..7c8452a63e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f90..b8ef95b0fa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d072f..2ffdf861f7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d532121..2af3b5c762 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e67e..1da08d5267 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b62c..b7a251353b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354c8..6e85b0aac9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff06d..f15dadf055 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed584..28b487df25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a4b..1e7f321e43 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc58..524b0820cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d524763d..f485dbf456 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 @@ -379,4 +380,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c747706131..70b35bf3b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f6d..1893e2fe88 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -324,4 +324,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346cf..16a4017da0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 @@ -318,4 +320,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018da..7e567f1eff 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b81..38ae55e5ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca7a..55e92e60a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c39f..60658d6a02 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc435..e203e8a7ed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b44..5944b97d54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e16357d..c66d416dad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9f7..a5459e766f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -325,4 +325,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad50..01d86825bb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d741..7b703e77cd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3a..d003c3d437 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74d..0d45a941aa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3a..51e13eb055 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee77..d002d8ef3c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8b..3f863d898d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acfd..61c7293106 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c7900..b5d17414f0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b83485..3a69e45496 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,30 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -515,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -556,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -2485,7 +2514,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3473,7 +3502,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3765,7 +3794,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3805,7 +3834,10 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3947,7 +3979,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3975,7 +4007,7 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_MAX = 0x6 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4605,7 +4637,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5241,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc04142..ad05b51a60 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba6..4e613cf633 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70a..b6e1ab76f8 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f3c..5cee9a3143 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -211,6 +213,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -307,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole @@ -1368,9 +1378,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db0a..7b97a154c9 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -2003,7 +2004,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2032,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -3404,3 +3463,14 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b5..4c2e1bdc01 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -246,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -346,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -477,12 +482,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -788,6 +797,14 @@ func FreeSid(sid *SID) (err error) { return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) len = uint32(r0) @@ -2149,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2157,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3025,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3041,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { @@ -4073,6 +4124,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -4106,6 +4163,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) { return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -4115,6 +4181,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { diff --git a/vendor/golang.org/x/term/LICENSE b/vendor/golang.org/x/term/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/term/LICENSE +++ b/vendor/golang.org/x/term/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f560604..df6bf948e1 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/text/internal/catmsg/codec.go b/vendor/golang.org/x/text/internal/catmsg/codec.go index 49c9fc9789..547802b0f3 100644 --- a/vendor/golang.org/x/text/internal/catmsg/codec.go +++ b/vendor/golang.org/x/text/internal/catmsg/codec.go @@ -257,7 +257,7 @@ func (d *Decoder) setError(err error) { // Language returns the language in which the message is being rendered. // // The destination language may be a child language of the language used for -// encoding. For instance, a decoding language of "pt-PT"" is consistent with an +// encoding. For instance, a decoding language of "pt-PT" is consistent with an // encoding language of "pt". func (d *Decoder) Language() language.Tag { return d.tag } diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493f..93a798ab63 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 1fc1de0bd1..0e0ba4c035 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -73,6 +73,15 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // check, Preorder is almost twice as fast as Nodes. The two // features seem to contribute similar slowdowns (~1.4x each). + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 0000000000..b7e959114c --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 636edb460a..aa69fb4d50 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1024,6 +1033,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1049,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1064,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } @@ -1110,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1138,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1456,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1597,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1641,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1812,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1831,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d339dfb02a..a462e7d013 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 76ea76df33..ffb5838cb1 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 7a3fd93fcd..b5db279aeb 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go index 6b867a46ed..c90c6015d2 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go @@ -1105,25 +1105,66 @@ func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the // macro tests whether the property is set to its default. For map and struct // types, the macro tests whether the property `x` is defined on `m`. +// +// Comprehensions for the standard environment macros evaluation can be best +// visualized as the following pseudocode: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` +// +// Comprehensions for the optional V2 macros which support map-to-map +// translation differ slightly from the standard environment macros in that +// they expose both the key or index in addition to the value for each list +// or map entry: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var`, `iter_var2` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` type Expr_Comprehension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The name of the iteration variable. + // The name of the first iteration variable. + // When the iter_range is a list, this variable is the list element. + // When the iter_range is a map, this variable is the map entry key. IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` - // The range over which var iterates. + // The name of the second iteration variable, empty if not set. + // When the iter_range is a list, this variable is the integer index. + // When the iter_range is a map, this variable is the map entry value. + // This field is only set for comprehension v2 macros. + IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"` + // The range over which the comprehension iterates. IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` // The name of the variable used for accumulation of the result. AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` // The initial value of the accumulator. AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Returns false when the result has been computed and may be used as // a hint to short-circuit the remainder of the comprehension. LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Computes the next value of accu_var. LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` @@ -1172,6 +1213,13 @@ func (x *Expr_Comprehension) GetIterVar() string { return "" } +func (x *Expr_Comprehension) GetIterVar2() string { + if x != nil { + return x.IterVar2 + } + return "" +} + func (x *Expr_Comprehension) GetIterRange() *Expr { if x != nil { return x.IterRange @@ -1485,7 +1533,7 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xae, 0x0d, 0x0a, 0x04, 0x45, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x0d, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -1567,132 +1615,134 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xfd, - 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0x9a, + 0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x3d, 0x0a, 0x0a, 0x69, - 0x74, 0x65, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, - 0x09, 0x69, 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, - 0x63, 0x75, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, - 0x63, 0x75, 0x56, 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, - 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, - 0x69, 0x74, 0x12, 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, - 0x70, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x32, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, - 0x6f, 0x70, 0x53, 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, - 0x0a, 0x09, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, - 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, - 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, - 0x00, 0x52, 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, - 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x8c, 0x07, 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, - 0x0a, 0x0e, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, 0x74, + 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, 0x5f, + 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, 0x56, + 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, 0x12, + 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, - 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, - 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, - 0x03, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, - 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x73, + 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, - 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, - 0x22, 0x6f, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, - 0x15, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, - 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, - 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, - 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x5d, 0x0a, 0x0f, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, 0x53, + 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, - 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x42, 0x6e, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, + 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x48, + 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x8c, 0x07, 0x0a, + 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, 0x03, 0x0a, 0x09, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x42, 0x0b, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, + 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, + 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, + 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, + 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, + 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x1a, 0x3c, + 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781a..5d4096d46a 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ab0fbb79b8..b572707c62 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705..abab279379 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50c..d7b40b7cb6 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f72..b181f386a1 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e..2b87bd79c7 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 87% rename from vendor/google.golang.org/grpc/pickfirst.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 8853626614..4d69b4052f 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,38 +16,48 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } func (pickfirstBuilder) Name() string { - return PickFirstBalancerName + return Name } type pfConfig struct { @@ -93,6 +103,12 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by @@ -124,7 +140,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -139,13 +155,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad225..260255d31b 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index af39b8a4c7..8ad6ce2f09 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -198,6 +208,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.cc.mu.Lock() defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } ccb.mu.Lock() if ccb.closed { @@ -248,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } @@ -314,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 1afb1e84ac..55bffaa77e 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 2359f94b8a..9c8850e3fd 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -31,6 +32,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" @@ -38,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -72,6 +75,8 @@ var ( // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -152,6 +157,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) for _, opt := range opts { opt.apply(&cc.dopts) } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) @@ -160,7 +175,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -168,30 +183,24 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.mkp = cc.dopts.copts.KeepaliveParams - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) + if err = cc.initAuthority(); err != nil { return nil, err } + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -586,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -622,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -641,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -692,8 +692,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - balancer.Register(pickfirstBuilder{}) - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -809,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -832,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -915,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -944,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -963,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -989,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1187,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1201,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1211,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1228,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1519,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1582,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport @@ -1673,22 +1664,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1707,15 +1695,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { defScheme = resolver.GetDefaultScheme() } - channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1805,7 +1790,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1838,6 +1823,5 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47..e840858b77 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443b..4c805c6446 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34edf..4114358545 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 00273702b6..2b285beee3 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -32,10 +33,16 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,10 +50,18 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -78,8 +93,8 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -89,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -490,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -649,12 +679,13 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } @@ -712,25 +743,25 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. // -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) } -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad7..e7b532b6f8 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d714..11d0ae142c 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 0000000000..074c5e234a --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03e..ceec319dd2 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 0000000000..1d827dd5d9 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 0000000000..3221f7a633 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced2..f1ae080dcb 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb9..db320105e6 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 0000000000..59c03bc14c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 0000000000..e524fdd40b --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab..07df71e98a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267..4b20358570 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d713..892dc13d16 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 6a93475a7f..d92335445f 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 8f793e6e89..f96b8ab492 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -43,6 +43,10 @@ const ( // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthClient interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -69,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -90,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -106,26 +110,16 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthServer interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -152,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -174,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -200,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream -} - -type healthWatchServer struct { - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index cce6312d77..d4b4b70815 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011a3..b15cf482d2 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,10 +25,10 @@ package backoff import ( "context" "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871..9669328914 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b0892..64c791953d 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507..078bb81238 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a5..0e6e18e185 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 9c915d9e4b..452985f8d8 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -40,6 +40,16 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e173..7617be2158 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de76..092ad187a2 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b510..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af1e..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16ac..19b9d63927 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab..6d8c2f518d 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 48d24bdb4e..7aae9240ff 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -174,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -184,25 +193,45 @@ var ( ChannelzTurnOffForTesting func() - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - - // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client - // singleton to invoke resource not found for a resource type name and - // resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) - // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. - UserSetDefaultScheme bool = false + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index f3f52a59a8..4552db16b0 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "strconv" @@ -35,7 +36,6 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -63,6 +63,8 @@ var ( func init() { resolver.Register(NewBuilder()) internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until internal.NewNetResolver = newNetResolver internal.AddressDialer = addressDialer } @@ -209,12 +211,12 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = MinResolutionInterval + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): return @@ -223,13 +225,13 @@ func (d *dnsResolver) watcher() { } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): return - case <-internal.TimeAfterFunc(waitTime): + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } @@ -423,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index a7ecaf8d52..c0eae4f5f8 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -51,11 +51,22 @@ var ( // The following vars are overridden from tests. var ( // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test + // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is // blocked waiting for the duration to elapse. TimeAfterFunc func(time.Duration) <-chan time.Time + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + // NewNetResolver returns the net.Resolver instance for the given target. NewNetResolver func(string) (NetResolver, error) diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572a..b901c7bace 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 0000000000..fd33af51ae --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 0000000000..be110d41f9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc82059..757925381f 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75..54c24c2ff3 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd..7e7aaa5463 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a890..d5c1085eea 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a2..ef72fbb3a0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a..ce878693bd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c70698..c769deab53 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index cab0e2d3d4..584b50fe55 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -38,12 +39,12 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: @@ -1440,7 +1455,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd44..3613d7b648 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa103257..54b2244365 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade9..924ba4f365 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err } t.windowHandler(n) - return + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d..eb42b19fb9 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 0000000000..c37c58c023 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 0000000000..228e9c2f20 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 0000000000..4d66b2ccc2 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 6c01a9b359..d2e15253bb 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -90,21 +90,6 @@ func Pairs(kv ...string) MD { return md } -// String implements the Stringer interface for pretty-printing a MD. -// Ordering of the values is non-deterministic as it ranges over a map. -func (md MD) String() string { - var sb strings.Builder - fmt.Fprintf(&sb, "MD{") - for k, v := range md { - if sb.Len() > 3 { - fmt.Fprintf(&sb, ", ") - } - fmt.Fprintf(&sb, "%s=[%s]", k, strings.Join(v, ", ")) - } - fmt.Fprintf(&sb, "}") - return sb.String() -} - // Len returns the number of items in md. func (md MD) Len() int { return len(md) @@ -228,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -243,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 56e8aba783..bdaa2130e4 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -22,7 +22,7 @@ import ( "context" "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -33,35 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -98,20 +106,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string @@ -145,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -197,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } // reset clears the pickerWrapper and prepares it for being used again when idle // mode is exited. func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd633643..e87a17f36a 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index 6c49c2333b..e1f58104d8 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,8 +21,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto package grpc_reflection_v1 @@ -789,7 +789,7 @@ func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { } var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1_reflection_proto_goTypes = []any{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse @@ -822,7 +822,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -834,7 +834,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -846,7 +846,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -858,7 +858,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -870,7 +870,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -882,7 +882,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -894,7 +894,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -906,7 +906,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { return nil } } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -919,14 +919,14 @@ func file_grpc_reflection_v1_reflection_proto_init() { } } } - file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []any{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go index 6e544f81e4..0310828076 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -21,8 +21,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto package grpc_reflection_v1 @@ -36,8 +36,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" @@ -49,7 +49,7 @@ const ( type ServerReflectionClient interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) } type serverReflectionClient struct { @@ -60,54 +60,39 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie return &serverReflectionClient{cc} } -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} + x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream} return x, nil } -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse] // ServerReflectionServer is the server API for ServerReflection service. // All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility +// for forward compatibility. type ServerReflectionServer interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error + ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error } -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} +// UnimplementedServerReflectionServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedServerReflectionServer struct{} -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { +func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") } +func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} // UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ServerReflectionServer will @@ -117,34 +102,22 @@ type UnsafeServerReflectionServer interface { } func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + // If the following call panics, it indicates UnimplementedServerReflectionServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&ServerReflection_ServiceDesc, srv) } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream + return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream}) } -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse] // ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 909b24a19d..0582e16af2 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -403,7 +403,7 @@ type ServerReflectionResponse_FileDescriptorResponse struct { } type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requst. + // This message is used to answer all_extension_numbers_of_type request. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` @@ -864,7 +864,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { } var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []any{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse @@ -897,7 +897,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -909,7 +909,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -921,7 +921,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -933,7 +933,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -945,7 +945,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -957,7 +957,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -969,7 +969,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -981,7 +981,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -994,14 +994,14 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { } } } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []any{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 3de5dc354f..80755d74d7 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -33,8 +33,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" @@ -46,7 +46,7 @@ const ( type ServerReflectionClient interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) } type serverReflectionClient struct { @@ -57,54 +57,39 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie return &serverReflectionClient{cc} } -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} + x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream} return x, nil } -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse] // ServerReflectionServer is the server API for ServerReflection service. // All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility +// for forward compatibility. type ServerReflectionServer interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error + ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error } -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} +// UnimplementedServerReflectionServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedServerReflectionServer struct{} -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { +func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") } +func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} // UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ServerReflectionServer will @@ -114,34 +99,22 @@ type UnsafeServerReflectionServer interface { } func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + // If the following call panics, it indicates UnimplementedServerReflectionServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&ServerReflection_ServiceDesc, srv) } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream + return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream}) } -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse] // ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go index 36ee650750..902fc6d35c 100644 --- a/vendor/google.golang.org/grpc/reflection/internal/internal.go +++ b/vendor/google.golang.org/grpc/reflection/internal/internal.go @@ -18,7 +18,7 @@ // Package internal contains code that is shared by both reflection package and // the test package. The packages are split in this way inorder to avoid -// depenedency to deprecated package github.com/golang/protobuf. +// dependency to deprecated package github.com/golang/protobuf. package internal import ( diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c2..0000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb..09e864a89d 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 9dcc9780f8..23bb3fb258 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -171,12 +171,15 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // ParseServiceConfig is called by resolver implementations to parse a JSON // representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) } // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e91..2d96f1405e 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792b..d1e1415a40 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 9da8fc8027..2671c5ef69 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" @@ -163,9 +164,11 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } @@ -183,12 +186,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } c := rsc.LoadBalancingConfig if c == nil { - name := PickFirstBalancerName + name := pickfirst.Name if rsc.LoadBalancingPolicy != nil { name = *rsc.LoadBalancingPolicy } if balancer.Get(name) == nil { - name = PickFirstBalancerName + name = pickfirst.Name } cfg := []map[string]any{{name: struct{}{}}} strCfg, err := json.Marshal(cfg) @@ -218,7 +221,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -264,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -278,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e..0000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd6518..71195c4943 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b54563e81c..bb2b2a216c 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,6 +23,7 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" @@ -34,13 +35,13 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -699,7 +710,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0..0037fee0bd 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index e1806e7600..a96b6a6bff 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.64.0" +const Version = "1.67.1" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index f47902371a..bb2966e3b4 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -102,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112bce..24bc98ac42 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d2b3ac031e..ea1d3e65a5 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e786d..099b2bf451 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index d96719829c..c2d6bd5265 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index ece53bea32..df53ff40b2 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -383,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 3bc3b1cdf8..8a57d60b08 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -534,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 570181eb48..e56c91a8db 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4ef4..ba83fea44c 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 1447a11987..f30ab6b586 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -860,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -873,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98de1..5d5771c2ed 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041edc9..f29e6a8fa8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c27..4bb0a7a20c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf1e..7a16ec13dd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2efa5..e06ece55a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f89136516f..18cb96fd70 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a0576..304244a651 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6e7..febd212247 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0bae1..e31249f64f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c1c33d0057..81b2b1a763 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 950e9a1fe7..bf0b6049b4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -567,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdced..019399d454 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index a6f0dbdade..ecb4623d70 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 @@ -389,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 29ba6bd355..99dc23c6f0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { @@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e94434c..da685e8a29 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e3116..5f20ca5d8a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e5b..a1f09162d0 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a3cba50802..dbbf1f6862 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 34 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index c9c8721a69..d248f29284 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -51,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -78,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca58542..f3cebab29c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index c629308675..6de31c2ebd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -116,18 +116,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 00102d3117..ea154eec44 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5b80afe520..cd8fadbaf8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f4e..75f83a2af0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 1603097311..9fe83cef5a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e8e..7f3583ead8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 4354701117..f7d386990a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52a6..de17773391 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 10c9030eb0..9403eb0750 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -3012,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3075,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -4706,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, @@ -4779,438 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x3f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x6c, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, - 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x42, 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, - 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, - 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x26, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, - 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, - 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, - 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, - 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, - 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, - 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, - 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, - 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, - 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, - 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, - 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, - 0x03, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, - 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, - 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, - 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, - 0x2a, 0x06, 0x08, 0xea, 0x07, 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x86, 0x4e, 0x10, 0x87, 0x4e, - 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, - 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, - 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, - 0xe2, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, - 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, - 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5227,7 +5243,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5329,38 +5345,39 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 49: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 50: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 51: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 52: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 53: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 54: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 55: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 56: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 57: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 58: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 59: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 60: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 61: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 62: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 63: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 64: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 65: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 66: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 67: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 68: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 69: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 75: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 76, // [76:76] is the sub-list for method output_type - 76, // [76:76] is the sub-list for method input_type - 76, // [76:76] is the sub-list for extension type_name - 76, // [76:76] is the sub-list for extension extendee - 0, // [0:76] is the sub-list for field type_name + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5369,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5381,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5393,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5405,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5419,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5431,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5443,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5455,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5467,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5479,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5491,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5505,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5519,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5533,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5547,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5561,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5575,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5589,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5603,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5615,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5629,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5641,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5653,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5665,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5677,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5689,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5701,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5713,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5725,7 +5742,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_FeatureSupport); i { case 0: return &v.state @@ -5737,7 +5754,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5749,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5761,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5773,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go index 39b024b46b..1ba1dfa5ad 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -294,7 +294,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { case fd.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: fd, - mapv: make(map[interface{}]protoreflect.Value), + mapv: make(map[any]protoreflect.Value), }) case fd.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: fd}) @@ -450,7 +450,7 @@ func (x *dynamicList) IsValid() bool { type dynamicMap struct { desc protoreflect.FieldDescriptor - mapv map[interface{}]protoreflect.Value + mapv map[any]protoreflect.Value } func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } @@ -634,11 +634,11 @@ func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { // // The InterfaceOf and ValueOf methods of the extension type are defined as: // -// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +// func (xt extensionType) ValueOf(iv any) protoreflect.Value { // return protoreflect.ValueOf(iv) // } // -// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +// func (xt extensionType) InterfaceOf(v protoreflect.Value) any { // return v.Interface() // } // @@ -658,7 +658,7 @@ func (xt extensionType) New() protoreflect.Value { case xt.desc.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: xt.desc, - mapv: make(map[interface{}]protoreflect.Value), + mapv: make(map[any]protoreflect.Value), }) case xt.desc.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) @@ -686,18 +686,18 @@ func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { return xt.desc } -func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +func (xt extensionType) ValueOf(iv any) protoreflect.Value { v := protoreflect.ValueOf(iv) typecheck(xt.desc, v) return v } -func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +func (xt extensionType) InterfaceOf(v protoreflect.Value) any { typecheck(xt.desc, v) return v.Interface() } -func (xt extensionType) IsValidInterface(iv interface{}) bool { +func (xt extensionType) IsValidInterface(iv any) bool { return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index b0df3fb334..a2ca940c50 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -90,27 +90,27 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x7d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0x84, - 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0xb2, 0x01, - 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x53, 0x4f, - 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, - 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f, - 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, - 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, - 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x70, 0x62, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -126,7 +126,7 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { } var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_go_features_proto_goTypes = []interface{}{ +var file_google_protobuf_go_features_proto_goTypes = []any{ (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } @@ -146,7 +146,7 @@ func file_google_protobuf_go_features_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GoFeatures); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be540..7172b43d38 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8dd4..1b71bcd910 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 9a7277ba39..d87b4fb828 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ +var file_google_protobuf_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e8789cb331..ac1e91bb6d 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ +var file_google_protobuf_field_mask_proto_goTypes = []any{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d2bac8b88e..d45361cbc7 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -49,11 +49,11 @@ // The standard Go "encoding/json" package has functionality to serialize // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and // ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by interface{}, map[string]interface{}, and []interface{}. +// a form represented by any, map[string]any, and []any. // This form can be used with other packages that operate on such data structures // and also directly with the standard json package. // -// In order to convert the interface{}, map[string]interface{}, and []interface{} +// In order to convert the any, map[string]any, and []any // forms back as Value, Struct, and ListValue messages, use the NewStruct, // NewList, and NewValue constructor functions. // @@ -88,28 +88,28 @@ // // To construct a Value message representing the above JSON object: // -// m, err := structpb.NewValue(map[string]interface{}{ +// m, err := structpb.NewValue(map[string]any{ // "firstName": "John", // "lastName": "Smith", // "isAlive": true, // "age": 27, -// "address": map[string]interface{}{ +// "address": map[string]any{ // "streetAddress": "21 2nd Street", // "city": "New York", // "state": "NY", // "postalCode": "10021-3100", // }, -// "phoneNumbers": []interface{}{ -// map[string]interface{}{ +// "phoneNumbers": []any{ +// map[string]any{ // "type": "home", // "number": "212 555-1234", // }, -// map[string]interface{}{ +// map[string]any{ // "type": "office", // "number": "646 555-4567", // }, // }, -// "children": []interface{}{}, +// "children": []any{}, // "spouse": nil, // }) // if err != nil { @@ -197,7 +197,7 @@ type Struct struct { // NewStruct constructs a Struct from a general-purpose Go map. // The map keys must be valid UTF-8. // The map values are converted using NewValue. -func NewStruct(v map[string]interface{}) (*Struct, error) { +func NewStruct(v map[string]any) (*Struct, error) { x := &Struct{Fields: make(map[string]*Value, len(v))} for k, v := range v { if !utf8.ValidString(k) { @@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) { // AsMap converts x to a general-purpose Go map. // The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]interface{} { +func (x *Struct) AsMap() map[string]any { f := x.GetFields() - vs := make(map[string]interface{}, len(f)) + vs := make(map[string]any, len(f)) for k, v := range f { vs[k] = v.AsInterface() } @@ -306,13 +306,13 @@ type Value struct { // ║ float32, float64 │ stored as NumberValue ║ // ║ string │ stored as StringValue; must be valid UTF-8 ║ // ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]interface{} │ stored as StructValue ║ -// ║ []interface{} │ stored as ListValue ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ // ╚════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. -func NewValue(v interface{}) (*Value, error) { +func NewValue(v any) (*Value, error) { switch v := v.(type) { case nil: return NewNullValue(), nil @@ -342,13 +342,13 @@ func NewValue(v interface{}) (*Value, error) { case []byte: s := base64.StdEncoding.EncodeToString(v) return NewStringValue(s), nil - case map[string]interface{}: + case map[string]any: v2, err := NewStruct(v) if err != nil { return nil, err } return NewStructValue(v2), nil - case []interface{}: + case []any: v2, err := NewList(v) if err != nil { return nil, err @@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value { // // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are // converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() interface{} { +func (x *Value) AsInterface() any { switch v := x.GetKind().(type) { case *Value_NumberValue: if v != nil { @@ -580,7 +580,7 @@ type ListValue struct { // NewList constructs a ListValue from a general-purpose Go slice. // The slice elements are converted using NewValue. -func NewList(v []interface{}) (*ListValue, error) { +func NewList(v []any) (*ListValue, error) { x := &ListValue{Values: make([]*Value, len(v))} for i, v := range v { var err error @@ -594,9 +594,9 @@ func NewList(v []interface{}) (*ListValue, error) { // AsSlice converts x to a general-purpose Go slice. // The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []interface{} { +func (x *ListValue) AsSlice() []any { vals := x.GetValues() - vs := make([]interface{}, len(vals)) + vs := make([]any, len(vals)) for i, v := range vals { vs[i] = v.AsInterface() } @@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte { var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []interface{}{ +var file_google_protobuf_struct_proto_goTypes = []any{ (NullValue)(0), // 0: google.protobuf.NullValue (*Struct)(nil), // 1: google.protobuf.Struct (*Value)(nil), // 2: google.protobuf.Value @@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Struct); i { case 0: return &v.state @@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Value); i { case 0: return &v.state @@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListValue); i { case 0: return &v.state @@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() { } } } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a3363..83a5a645b0 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 762a87130f..e473f826aa 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { } var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ +var file_google_protobuf_wrappers_proto_goTypes = []any{ (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue (*FloatValue)(nil), // 1: google.protobuf.FloatValue (*Int64Value)(nil), // 2: google.protobuf.Int64Value @@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DoubleValue); i { case 0: return &v.state @@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FloatValue); i { case 0: return &v.state @@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Int64Value); i { case 0: return &v.state @@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UInt64Value); i { case 0: return &v.state @@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Int32Value); i { case 0: return &v.state @@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UInt32Value); i { case 0: return &v.state @@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*StringValue); i { case 0: return &v.state @@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*BytesValue); i { case 0: return &v.state diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore new file mode 100644 index 0000000000..b7ed7f956d --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore @@ -0,0 +1,6 @@ +# editor and IDE paraphernalia +.idea +.vscode + +# macOS paraphernalia +.DS_Store diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE new file mode 100644 index 0000000000..df76d7d771 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md new file mode 100644 index 0000000000..28e3516937 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/README.md @@ -0,0 +1,317 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionality for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch/v5 +``` + +**Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +These global variables control the behavior of `jsonpatch.Apply`. + +An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior +is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. + +Structure `jsonpatch.ApplyOptions` includes the configuration options above +and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. + +When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore +`remove` operations whose `path` points to a non-existent location in the JSON document. +`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` +returning an error when hitting a missing `path` on `remove`. + +When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure +that `add` operations produce all the `path` elements that are missing from the target object. + +Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` +whose values are populated from the global configuration variables. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "different"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "different" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/errors.go b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go new file mode 100644 index 0000000000..75304b4437 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/merge.go b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go new file mode 100644 index 0000000000..ad88d40181 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go @@ -0,0 +1,389 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + if !mergeMerge { + pruneNulls(v) + } + + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + } + newAry = append(newAry, v) + } + + *ary = newAry + + return ary +} + +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, ErrBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, ErrBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, ErrBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go new file mode 100644 index 0000000000..dc2b7e51e6 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go @@ -0,0 +1,851 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx < 0 { + if !SupportNegativeIndices { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = &val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = *sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/k8s.io/api/admission/v1/doc.go b/vendor/k8s.io/api/admission/v1/doc.go index cbc6bb59dd..e7df9f629c 100644 --- a/vendor/k8s.io/api/admission/v1/doc.go +++ b/vendor/k8s.io/api/admission/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=false - +// +k8s:prerelease-lifecycle-gen=true // +groupName=admission.k8s.io package v1 // import "k8s.io/api/admission/v1" diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto index 941deb4fb4..9648aa58fb 100644 --- a/vendor/k8s.io/api/admission/v1/generated.proto +++ b/vendor/k8s.io/api/admission/v1/generated.proto @@ -38,10 +38,10 @@ message AdmissionRequest { optional string uid = 1; // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; // Resource is the fully-qualified resource being requested (for example, v1.pods) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; // SubResource is the subresource being requested, if any (for example, "status" or "scale") // +optional @@ -58,7 +58,7 @@ message AdmissionRequest { // // See documentation for the "matchPolicy" field in the webhook configuration type for more details. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. @@ -71,7 +71,7 @@ message AdmissionRequest { // // See documentation for the "matchPolicy" field in the webhook configuration type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. @@ -93,15 +93,15 @@ message AdmissionRequest { optional string operation = 7; // UserInfo is information about the requesting user - optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8; // Object is the object from the incoming request. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; // OldObject is the existing object. Only populated for DELETE and UPDATE requests. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; // DryRun indicates that modifications will definitely not be persisted for this request. // Defaults to false. @@ -114,7 +114,7 @@ message AdmissionRequest { // Operation might be a CREATE, in which case the Options will a // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; } // AdmissionResponse describes an admission response. @@ -129,7 +129,7 @@ message AdmissionResponse { // Result contains extra details into why an admission request was denied. // This field IS NOT consulted in any way if "Allowed" is "true". // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. // +optional diff --git a/vendor/k8s.io/api/admission/v1/types.go b/vendor/k8s.io/api/admission/v1/types.go index 556fd1ad54..2def92da5b 100644 --- a/vendor/k8s.io/api/admission/v1/types.go +++ b/vendor/k8s.io/api/admission/v1/types.go @@ -24,6 +24,7 @@ import ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // AdmissionReview describes an admission review request/response. type AdmissionReview struct { diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..ac81d993c6 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,28 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto index ff0fa46d25..d27c05b727 100644 --- a/vendor/k8s.io/api/admission/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto @@ -38,10 +38,10 @@ message AdmissionRequest { optional string uid = 1; // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; // Resource is the fully-qualified resource being requested (for example, v1.pods) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; // SubResource is the subresource being requested, if any (for example, "status" or "scale") // +optional @@ -58,7 +58,7 @@ message AdmissionRequest { // // See documentation for the "matchPolicy" field in the webhook configuration type for more details. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. @@ -71,7 +71,7 @@ message AdmissionRequest { // // See documentation for the "matchPolicy" field in the webhook configuration type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. @@ -93,15 +93,15 @@ message AdmissionRequest { optional string operation = 7; // UserInfo is information about the requesting user - optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8; // Object is the object from the incoming request. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; // OldObject is the existing object. Only populated for DELETE and UPDATE requests. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; // DryRun indicates that modifications will definitely not be persisted for this request. // Defaults to false. @@ -114,7 +114,7 @@ message AdmissionRequest { // Operation might be a CREATE, in which case the Options will a // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; } // AdmissionResponse describes an admission response. @@ -129,7 +129,7 @@ message AdmissionResponse { // Result contains extra details into why an admission request was denied. // This field IS NOT consulted in any way if "Allowed" is "true". // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. // +optional diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go index c3940f090c..ca0086188a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=admissionregistration.k8s.io // Package v1 is the v1 version of the API. diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index 44589007a2..e856e9eaf2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -156,7 +156,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -170,7 +170,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -290,7 +290,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -304,7 +304,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -374,7 +374,7 @@ message MutatingWebhook { message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -390,7 +390,7 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; @@ -463,7 +463,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -570,16 +570,11 @@ message TypeChecking { repeated ExpressionWarning expressionWarnings = 1; } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -606,7 +601,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -617,7 +612,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -688,14 +683,12 @@ message ValidatingAdmissionPolicyBindingSpec { repeated string validationActions = 4; } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -800,7 +793,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -891,7 +884,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -905,7 +898,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -957,7 +950,7 @@ message ValidatingWebhook { message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -973,7 +966,7 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index 0510712b24..4efeb26748 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -137,6 +137,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.30 + // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. type ValidatingAdmissionPolicy struct { metav1.TypeMeta `json:",inline"` @@ -195,6 +196,7 @@ type ExpressionWarning struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.30 + // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. type ValidatingAdmissionPolicyList struct { metav1.TypeMeta `json:",inline"` @@ -203,7 +205,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -464,7 +466,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. @@ -713,6 +715,7 @@ type NamedRuleWithOperations struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. type ValidatingWebhookConfiguration struct { @@ -730,6 +733,7 @@ type ValidatingWebhookConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. type ValidatingWebhookConfigurationList struct { @@ -745,6 +749,7 @@ type ValidatingWebhookConfigurationList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. type MutatingWebhookConfiguration struct { @@ -762,6 +767,7 @@ type MutatingWebhookConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. type MutatingWebhookConfigurationList struct { diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..0862bb1f2d --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index db02dd929f..d5974d5ec4 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -156,7 +156,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -170,7 +170,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -211,7 +211,7 @@ message NamedRuleWithOperations { repeated string resourceNames = 1; // RuleWithOperations is a tuple of Operations and Resources. - optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; } // ParamKind is a tuple of Group Kind and Version. @@ -267,7 +267,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -295,7 +295,7 @@ message TypeChecking { message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -322,7 +322,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -333,7 +333,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -409,7 +409,7 @@ message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -514,7 +514,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // Validation specifies the CEL expression which is used to apply the validation. diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index bd6b17e158..78d918bc72 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -142,7 +142,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -404,7 +404,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 91479acc20..30f99f64d0 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -157,7 +157,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -171,7 +171,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -223,7 +223,7 @@ message MutatingWebhook { // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. // +listType=atomic - repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -291,7 +291,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -305,7 +305,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -379,7 +379,7 @@ message MutatingWebhook { message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -395,7 +395,7 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; @@ -410,7 +410,7 @@ message NamedRuleWithOperations { repeated string resourceNames = 1; // RuleWithOperations is a tuple of Operations and Resources. - optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; } // ParamKind is a tuple of Group Kind and Version. @@ -468,7 +468,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -523,7 +523,7 @@ message TypeChecking { message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -550,7 +550,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -561,7 +561,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -639,7 +639,7 @@ message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -744,7 +744,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -767,7 +767,7 @@ message ValidatingWebhook { // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. // +listType=atomic - repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -835,7 +835,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -849,7 +849,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -906,7 +906,7 @@ message ValidatingWebhook { message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -922,7 +922,7 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index cf1e29a6ca..0f59031239 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -158,7 +158,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -419,7 +419,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. diff --git a/vendor/k8s.io/api/apidiscovery/v2/doc.go b/vendor/k8s.io/api/apidiscovery/v2/doc.go index d47aa85976..4f3ad5f139 100644 --- a/vendor/k8s.io/api/apidiscovery/v2/doc.go +++ b/vendor/k8s.io/api/apidiscovery/v2/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=apidiscovery.k8s.io package v2 // import "k8s.io/api/apidiscovery/v2" diff --git a/vendor/k8s.io/api/apidiscovery/v2/generated.proto b/vendor/k8s.io/api/apidiscovery/v2/generated.proto index fa56318a6d..62f2d7f2c1 100644 --- a/vendor/k8s.io/api/apidiscovery/v2/generated.proto +++ b/vendor/k8s.io/api/apidiscovery/v2/generated.proto @@ -38,7 +38,7 @@ message APIGroupDiscovery { // name is allowed to be "" to represent the legacy, ungroupified resources. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // versions are the versions supported in this group. They are sorted in descending order of preference, // with the preferred version being the first entry. @@ -55,7 +55,7 @@ message APIGroupDiscoveryList { // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of groups for discovery. The groups are listed in priority order. repeated APIGroupDiscovery items = 2; @@ -72,7 +72,7 @@ message APIResourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // scope indicates the scope of a resource, either Cluster or Namespaced optional string scope = 3; @@ -112,7 +112,7 @@ message APISubresourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // Some subresources do not return normal resources, these will have null or empty return types. - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // acceptedTypes describes the kinds that this endpoint accepts. // Subresources may accept the standard content types or define @@ -122,7 +122,7 @@ message APISubresourceDiscovery { // +listMapKey=group // +listMapKey=version // +listMapKey=kind - repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; // verbs is a list of supported API operation types (this includes // but is not limited to get, list, watch, create, update, patch, diff --git a/vendor/k8s.io/api/apidiscovery/v2/types.go b/vendor/k8s.io/api/apidiscovery/v2/types.go index f0e31bcde5..449679b61d 100644 --- a/vendor/k8s.io/api/apidiscovery/v2/types.go +++ b/vendor/k8s.io/api/apidiscovery/v2/types.go @@ -21,6 +21,7 @@ import ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 // APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. // This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated @@ -37,6 +38,7 @@ type APIGroupDiscoveryList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 // APIGroupDiscovery holds information about which resources are being served for all version of the API Group. // It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. diff --git a/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b7132c647d --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto index a09af750ba..e9ae88072a 100644 --- a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto @@ -38,7 +38,7 @@ message APIGroupDiscovery { // name is allowed to be "" to represent the legacy, ungroupified resources. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // versions are the versions supported in this group. They are sorted in descending order of preference, // with the preferred version being the first entry. @@ -55,7 +55,7 @@ message APIGroupDiscoveryList { // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of groups for discovery. The groups are listed in priority order. repeated APIGroupDiscovery items = 2; @@ -72,7 +72,7 @@ message APIResourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // scope indicates the scope of a resource, either Cluster or Namespaced optional string scope = 3; @@ -112,7 +112,7 @@ message APISubresourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // Some subresources do not return normal resources, these will have null or empty return types. - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // acceptedTypes describes the kinds that this endpoint accepts. // Subresources may accept the standard content types or define @@ -122,7 +122,7 @@ message APISubresourceDiscovery { // +listMapKey=group // +listMapKey=version // +listMapKey=kind - repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; // verbs is a list of supported API operation types (this includes // but is not limited to get, list, watch, create, update, patch, diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto index ef44290480..8a77860720 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -52,7 +52,7 @@ message ServerStorageVersion { // Storage version of a specific resource. message StorageVersion { // The name is .. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is an empty spec. It is here to comply with Kubernetes API style. optional StorageVersionSpec spec = 2; @@ -77,7 +77,7 @@ message StorageVersionCondition { optional int64 observedGeneration = 3; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // The reason for the condition's last transition. // +required @@ -93,7 +93,7 @@ message StorageVersionList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items holds a list of StorageVersion repeated StorageVersion items = 2; diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 61dc97bde5..d189e860f2 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 9001416861..d864f2eebf 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -43,10 +43,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -56,7 +56,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -67,7 +67,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -93,7 +93,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -109,7 +109,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -121,7 +121,7 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -129,7 +129,7 @@ message DaemonSetSpec { // selector is specified). // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -225,7 +225,7 @@ message Deployment { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -245,10 +245,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -261,7 +261,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -277,11 +277,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -376,7 +376,7 @@ message ReplicaSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -402,7 +402,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -418,7 +418,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -444,13 +444,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -501,7 +501,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -522,7 +522,7 @@ message RollingUpdateDaemonSet { // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -538,7 +538,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -552,7 +552,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -572,7 +572,7 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // StatefulSet represents a set of pods with consistent identities. @@ -586,7 +586,7 @@ message StatefulSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -608,7 +608,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -624,7 +624,7 @@ message StatefulSetList { // Standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of stateful sets. repeated StatefulSet items = 2; @@ -675,7 +675,7 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -684,7 +684,7 @@ message StatefulSetSpec { // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -695,7 +695,7 @@ message StatefulSetSpec { // TODO: Define the behavior if a claim already exists with the same name. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -738,14 +738,13 @@ message StatefulSetSpec { // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // which is beta. + // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 96ff620986..e942cd526e 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -37,6 +37,7 @@ const ( // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: @@ -255,14 +256,13 @@ type StatefulSetSpec struct { // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // which is beta. + // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -335,6 +335,7 @@ type StatefulSetCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { @@ -353,6 +354,7 @@ type StatefulSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { @@ -555,6 +557,7 @@ type DeploymentCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DeploymentList is a list of Deployments. type DeploymentList struct { @@ -747,6 +750,7 @@ type DaemonSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { @@ -778,6 +782,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { @@ -796,6 +801,7 @@ type DaemonSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { @@ -823,6 +829,7 @@ type ReplicaSet struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { @@ -925,6 +932,7 @@ type ReplicaSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain @@ -950,6 +958,7 @@ type ControllerRevision struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevisionList is a resource containing a list of ControllerRevision objects. type ControllerRevisionList struct { diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 6676da0640..f3e221a0e9 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -354,8 +354,8 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..34a036b625 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevision) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevisionList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Deployment) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 8965622257..4b0fa366cf 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -45,10 +45,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -70,7 +70,7 @@ message ControllerRevisionList { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -90,10 +90,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -106,7 +106,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -136,11 +136,11 @@ message DeploymentSpec { // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -254,7 +254,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -268,7 +268,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -287,14 +287,14 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -342,7 +342,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -364,7 +364,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -378,7 +378,7 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } @@ -429,7 +429,7 @@ message StatefulSetSpec { // If empty, defaulted to labels on the pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -437,7 +437,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -448,7 +448,7 @@ message StatefulSetSpec { // TODO: Define the behavior if a claim already exists with the same name. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -493,9 +493,7 @@ message StatefulSetSpec { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index bdf9f93a9b..07bfa88c5f 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -297,9 +297,7 @@ type StatefulSetSpec struct { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index a62e9869d6..9e7fb1adc2 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -259,7 +259,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 3ae8a80094..d3db8956e8 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -45,10 +45,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -71,7 +71,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -97,7 +97,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -113,7 +113,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -125,7 +125,7 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -133,7 +133,7 @@ message DaemonSetSpec { // selector is specified). // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -230,7 +230,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -250,10 +250,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -266,7 +266,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -282,11 +282,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -382,7 +382,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -408,7 +408,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -424,7 +424,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -450,13 +450,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -507,7 +507,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -528,7 +528,7 @@ message RollingUpdateDaemonSet { // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -544,7 +544,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -558,7 +558,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -578,14 +578,14 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -634,7 +634,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -656,7 +656,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -670,7 +670,7 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } @@ -720,7 +720,7 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -729,7 +729,7 @@ message StatefulSetSpec { // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -740,7 +740,7 @@ message StatefulSetSpec { // TODO: Define the behavior if a claim already exists with the same name. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -785,9 +785,7 @@ message StatefulSetSpec { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 6981c2a175..f93a5bea7e 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -307,9 +307,7 @@ type StatefulSetSpec struct { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index d7e9209915..0b8fe34af1 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -383,7 +383,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 1614265bdf..3bdc89badc 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index 1fe2f4f2ce..ae9763576c 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -63,7 +63,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -81,7 +81,7 @@ message TokenRequest { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenRequestSpec spec = 2; @@ -123,7 +123,7 @@ message TokenRequestStatus { optional string token = 1; // ExpirationTimestamp is the time of expiration of the returned token. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; } // TokenReview attempts to authenticate a token to a known user. @@ -133,7 +133,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 4f4400e305..2dc0707c4f 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -45,6 +45,7 @@ const ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator @@ -134,6 +135,7 @@ func (t ExtraValue) String() string { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // TokenRequest requests a token for a given service account. type TokenRequest struct { @@ -206,6 +208,7 @@ type BoundObjectReference struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. // When using impersonation, users will receive the user info of the user being impersonated. If impersonation or diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b612bdec48 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto index 51d9252440..4585e5cdd3 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -36,7 +36,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -46,6 +46,6 @@ message SelfSubjectReview { message SelfSubjectReviewStatus { // User attributes of the user making this request. // +optional - optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; } diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index 61658245d4..d0f6fe4402 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -45,7 +45,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -55,7 +55,7 @@ message SelfSubjectReview { message SelfSubjectReviewStatus { // User attributes of the user making this request. // +optional - optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; } // TokenReview attempts to authenticate a token to a known user. @@ -65,7 +65,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index cf100e6b75..77e5a19c4c 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=authorization.k8s.io package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index dfa109b424..aed9a3a476 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -72,10 +73,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *FieldSelectorAttributes) Reset() { *m = FieldSelectorAttributes{} } +func (*FieldSelectorAttributes) ProtoMessage() {} +func (*FieldSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{1} +} +func (m *FieldSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorAttributes.Merge(m, src) +} +func (m *FieldSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorAttributes proto.InternalMessageInfo + +func (m *LabelSelectorAttributes) Reset() { *m = LabelSelectorAttributes{} } +func (*LabelSelectorAttributes) ProtoMessage() {} +func (*LabelSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{2} +} +func (m *LabelSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorAttributes.Merge(m, src) +} +func (m *LabelSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorAttributes proto.InternalMessageInfo + func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{1} + return fileDescriptor_aafd0e5e70cec678, []int{3} } func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +160,7 @@ var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } func (*NonResourceAttributes) ProtoMessage() {} func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{2} + return fileDescriptor_aafd0e5e70cec678, []int{4} } func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +188,7 @@ var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } func (*NonResourceRule) ProtoMessage() {} func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{3} + return fileDescriptor_aafd0e5e70cec678, []int{5} } func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +216,7 @@ var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (*ResourceAttributes) ProtoMessage() {} func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{4} + return fileDescriptor_aafd0e5e70cec678, []int{6} } func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +244,7 @@ var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo func (m *ResourceRule) Reset() { *m = ResourceRule{} } func (*ResourceRule) ProtoMessage() {} func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{5} + return fileDescriptor_aafd0e5e70cec678, []int{7} } func (m *ResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +272,7 @@ var xxx_messageInfo_ResourceRule proto.InternalMessageInfo func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (*SelfSubjectAccessReview) ProtoMessage() {} func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{6} + return fileDescriptor_aafd0e5e70cec678, []int{8} } func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +300,7 @@ var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{7} + return fileDescriptor_aafd0e5e70cec678, []int{9} } func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +328,7 @@ var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } func (*SelfSubjectRulesReview) ProtoMessage() {} func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{8} + return fileDescriptor_aafd0e5e70cec678, []int{10} } func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +356,7 @@ var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{9} + return fileDescriptor_aafd0e5e70cec678, []int{11} } func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +384,7 @@ var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (*SubjectAccessReview) ProtoMessage() {} func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{10} + return fileDescriptor_aafd0e5e70cec678, []int{12} } func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +412,7 @@ var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{11} + return fileDescriptor_aafd0e5e70cec678, []int{13} } func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +440,7 @@ var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{12} + return fileDescriptor_aafd0e5e70cec678, []int{14} } func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +468,7 @@ var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{13} + return fileDescriptor_aafd0e5e70cec678, []int{15} } func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,6 +495,8 @@ var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") + proto.RegisterType((*FieldSelectorAttributes)(nil), "k8s.io.api.authorization.v1.FieldSelectorAttributes") + proto.RegisterType((*LabelSelectorAttributes)(nil), "k8s.io.api.authorization.v1.LabelSelectorAttributes") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") @@ -459,78 +518,85 @@ func init() { } var fileDescriptor_aafd0e5e70cec678 = []byte{ - // 1126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xfa, 0x4f, 0x6a, 0x3f, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x11, 0x76, 0xb4, 0x48, - 0x90, 0xaa, 0x65, 0x97, 0x58, 0x6d, 0x13, 0x55, 0xaa, 0x90, 0xad, 0x46, 0x28, 0x52, 0x5b, 0xaa, - 0x89, 0x12, 0x89, 0x22, 0x10, 0xe3, 0xf5, 0xc4, 0x5e, 0x62, 0xef, 0x2e, 0x3b, 0xbb, 0x0e, 0xe1, - 0x54, 0x89, 0x2f, 0xc0, 0x91, 0x03, 0x07, 0xbe, 0x01, 0x17, 0x24, 0x6e, 0x1c, 0x38, 0xa0, 0x1c, - 0x7b, 0x2c, 0x12, 0xb2, 0xc8, 0x72, 0xe6, 0x3b, 0xa0, 0x99, 0x1d, 0x7b, 0xd7, 0xc9, 0xda, 0x8d, - 0x39, 0xd0, 0x4b, 0x6f, 0xde, 0xf7, 0xfb, 0xbd, 0x37, 0x6f, 0xde, 0xbf, 0x79, 0x86, 0x5b, 0x47, - 0xdb, 0x4c, 0xb7, 0x1c, 0x83, 0xb8, 0x96, 0x41, 0x02, 0xbf, 0xe3, 0x78, 0xd6, 0x37, 0xc4, 0xb7, - 0x1c, 0xdb, 0xe8, 0x6f, 0x1a, 0x6d, 0x6a, 0x53, 0x8f, 0xf8, 0xb4, 0xa5, 0xbb, 0x9e, 0xe3, 0x3b, - 0x68, 0x2d, 0x22, 0xeb, 0xc4, 0xb5, 0xf4, 0x31, 0xb2, 0xde, 0xdf, 0x5c, 0x7d, 0xbf, 0x6d, 0xf9, - 0x9d, 0xa0, 0xa9, 0x9b, 0x4e, 0xcf, 0x68, 0x3b, 0x6d, 0xc7, 0x10, 0x3a, 0xcd, 0xe0, 0x50, 0x7c, - 0x89, 0x0f, 0xf1, 0x2b, 0xb2, 0xb5, 0x7a, 0x27, 0x3e, 0xb8, 0x47, 0xcc, 0x8e, 0x65, 0x53, 0xef, - 0xc4, 0x70, 0x8f, 0xda, 0x5c, 0xc0, 0x8c, 0x1e, 0xf5, 0x49, 0x8a, 0x07, 0xab, 0xc6, 0x24, 0x2d, - 0x2f, 0xb0, 0x7d, 0xab, 0x47, 0x2f, 0x28, 0xdc, 0x7b, 0x95, 0x02, 0x33, 0x3b, 0xb4, 0x47, 0xce, - 0xeb, 0x69, 0x5b, 0x00, 0x3b, 0x5f, 0xfb, 0x1e, 0x39, 0x20, 0xdd, 0x80, 0xa2, 0x2a, 0x14, 0x2c, - 0x9f, 0xf6, 0x98, 0xaa, 0xac, 0xe7, 0x36, 0x4a, 0x8d, 0x52, 0x38, 0xa8, 0x16, 0x76, 0xb9, 0x00, - 0x47, 0xf2, 0xfb, 0xc5, 0xef, 0x7f, 0xac, 0x66, 0x9e, 0xff, 0xb9, 0x9e, 0xd1, 0x7e, 0xce, 0x82, - 0xfa, 0xc8, 0x31, 0x49, 0x77, 0x2f, 0x68, 0x7e, 0x49, 0x4d, 0xbf, 0x6e, 0x9a, 0x94, 0x31, 0x4c, - 0xfb, 0x16, 0x3d, 0x46, 0x5f, 0x40, 0x91, 0xdf, 0xac, 0x45, 0x7c, 0xa2, 0x2a, 0xeb, 0xca, 0x46, - 0xb9, 0xf6, 0x81, 0x1e, 0xc7, 0x74, 0xe4, 0xa0, 0xee, 0x1e, 0xb5, 0xb9, 0x80, 0xe9, 0x9c, 0xad, - 0xf7, 0x37, 0xf5, 0x8f, 0x85, 0xad, 0xc7, 0xd4, 0x27, 0x0d, 0x74, 0x3a, 0xa8, 0x66, 0xc2, 0x41, - 0x15, 0x62, 0x19, 0x1e, 0x59, 0x45, 0x07, 0x90, 0x67, 0x2e, 0x35, 0xd5, 0xac, 0xb0, 0x7e, 0x47, - 0x9f, 0x92, 0x31, 0x3d, 0xc5, 0xc3, 0x3d, 0x97, 0x9a, 0x8d, 0xab, 0xf2, 0x84, 0x3c, 0xff, 0xc2, - 0xc2, 0x1e, 0xfa, 0x1c, 0xe6, 0x98, 0x4f, 0xfc, 0x80, 0xa9, 0x39, 0x61, 0xf9, 0xde, 0xcc, 0x96, - 0x85, 0x76, 0xe3, 0x2d, 0x69, 0x7b, 0x2e, 0xfa, 0xc6, 0xd2, 0xaa, 0xf6, 0x29, 0x2c, 0x3f, 0x71, - 0x6c, 0x4c, 0x99, 0x13, 0x78, 0x26, 0xad, 0xfb, 0xbe, 0x67, 0x35, 0x03, 0x9f, 0x32, 0xb4, 0x0e, - 0x79, 0x97, 0xf8, 0x1d, 0x11, 0xae, 0x52, 0xec, 0xda, 0x53, 0xe2, 0x77, 0xb0, 0x40, 0x38, 0xa3, - 0x4f, 0xbd, 0xa6, 0xb8, 0x72, 0x82, 0x71, 0x40, 0xbd, 0x26, 0x16, 0x88, 0xf6, 0x15, 0x2c, 0x24, - 0x8c, 0xe3, 0xa0, 0x2b, 0x32, 0xca, 0xa1, 0xb1, 0x8c, 0x72, 0x0d, 0x86, 0x23, 0x39, 0x7a, 0x00, - 0x0b, 0x76, 0xac, 0xb3, 0x8f, 0x1f, 0x31, 0x35, 0x2b, 0xa8, 0x4b, 0xe1, 0xa0, 0x9a, 0x34, 0xc7, - 0x21, 0x7c, 0x9e, 0xab, 0xfd, 0x9a, 0x05, 0x94, 0x72, 0x1b, 0x03, 0x4a, 0x36, 0xe9, 0x51, 0xe6, - 0x12, 0x93, 0xca, 0x2b, 0x5d, 0x93, 0x0e, 0x97, 0x9e, 0x0c, 0x01, 0x1c, 0x73, 0x5e, 0x7d, 0x39, - 0xf4, 0x0e, 0x14, 0xda, 0x9e, 0x13, 0xb8, 0x22, 0x31, 0xa5, 0xc6, 0xbc, 0xa4, 0x14, 0x3e, 0xe2, - 0x42, 0x1c, 0x61, 0xe8, 0x26, 0x5c, 0xe9, 0x53, 0x8f, 0x59, 0x8e, 0xad, 0xe6, 0x05, 0x6d, 0x41, - 0xd2, 0xae, 0x1c, 0x44, 0x62, 0x3c, 0xc4, 0xd1, 0x6d, 0x28, 0x7a, 0xd2, 0x71, 0xb5, 0x20, 0xb8, - 0x8b, 0x92, 0x5b, 0x1c, 0x45, 0x70, 0xc4, 0x40, 0x77, 0xa1, 0xcc, 0x82, 0xe6, 0x48, 0x61, 0x4e, - 0x28, 0x2c, 0x49, 0x85, 0xf2, 0x5e, 0x0c, 0xe1, 0x24, 0x8f, 0x5f, 0x8b, 0xdf, 0x51, 0xbd, 0x32, - 0x7e, 0x2d, 0x1e, 0x02, 0x2c, 0x10, 0xed, 0x37, 0x05, 0xae, 0xce, 0x96, 0xb1, 0x5b, 0x50, 0x22, - 0xae, 0x25, 0xae, 0x3d, 0xcc, 0xd5, 0x3c, 0x8f, 0x6b, 0xfd, 0xe9, 0x6e, 0x24, 0xc4, 0x31, 0xce, - 0xc9, 0x43, 0x67, 0x78, 0x49, 0x8f, 0xc8, 0xc3, 0x23, 0x19, 0x8e, 0x71, 0xb4, 0x05, 0xf3, 0xc3, - 0x0f, 0x91, 0x24, 0x35, 0x2f, 0x14, 0xae, 0x85, 0x83, 0xea, 0x3c, 0x4e, 0x02, 0x78, 0x9c, 0xa7, - 0xfd, 0x92, 0x85, 0x95, 0x3d, 0xda, 0x3d, 0x7c, 0x3d, 0xb3, 0xe0, 0xd9, 0xd8, 0x2c, 0xd8, 0x9e, - 0xde, 0xb1, 0xe9, 0x5e, 0xbe, 0xb6, 0x79, 0xf0, 0x43, 0x16, 0xd6, 0xa6, 0xf8, 0x84, 0x8e, 0x01, - 0x79, 0x17, 0xda, 0x4b, 0xc6, 0xd1, 0x98, 0xea, 0xcb, 0xc5, 0xae, 0x6c, 0x5c, 0x0f, 0x07, 0xd5, - 0x94, 0x6e, 0xc5, 0x29, 0x47, 0xa0, 0x6f, 0x15, 0x58, 0xb6, 0xd3, 0x26, 0x95, 0x0c, 0x73, 0x6d, - 0xea, 0xe1, 0xa9, 0x33, 0xae, 0x71, 0x23, 0x1c, 0x54, 0xd3, 0xc7, 0x1f, 0x4e, 0x3f, 0x8b, 0xbf, - 0x32, 0xd7, 0x13, 0xe1, 0xe1, 0x0d, 0xf2, 0xff, 0xd5, 0xd5, 0x27, 0x63, 0x75, 0xb5, 0x75, 0xd9, - 0xba, 0x4a, 0x38, 0x39, 0xb1, 0xac, 0x3e, 0x3b, 0x57, 0x56, 0x77, 0x2f, 0x53, 0x56, 0x49, 0xc3, - 0xd3, 0xab, 0xea, 0x31, 0xac, 0x4e, 0x76, 0x68, 0xe6, 0xe1, 0xac, 0xfd, 0x94, 0x85, 0xa5, 0x37, - 0xcf, 0xfc, 0x2c, 0x6d, 0xfd, 0x7b, 0x1e, 0x56, 0xde, 0xb4, 0xf4, 0xa4, 0x45, 0x27, 0x60, 0xd4, - 0x93, 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x7a, 0x58, 0x20, 0x48, 0x83, 0xb9, 0x76, 0xf4, 0xba, - 0x45, 0xef, 0x0f, 0xf0, 0x00, 0xcb, 0xa7, 0x4d, 0x22, 0xa8, 0x05, 0x05, 0xca, 0xf7, 0x56, 0xb5, - 0xb0, 0x9e, 0xdb, 0x28, 0xd7, 0x3e, 0xfc, 0x2f, 0x95, 0xa1, 0x8b, 0xcd, 0x77, 0xc7, 0xf6, 0xbd, - 0x93, 0x78, 0x9d, 0x10, 0x32, 0x1c, 0x19, 0x47, 0x6f, 0x43, 0x2e, 0xb0, 0x5a, 0xf2, 0xb5, 0x2f, - 0x4b, 0x4a, 0x6e, 0x7f, 0xf7, 0x21, 0xe6, 0xf2, 0x55, 0x22, 0x97, 0x67, 0x61, 0x02, 0x2d, 0x42, - 0xee, 0x88, 0x9e, 0x44, 0x0d, 0x85, 0xf9, 0x4f, 0xf4, 0x00, 0x0a, 0x7d, 0xbe, 0x57, 0xcb, 0xf8, - 0xbe, 0x37, 0xd5, 0xc9, 0x78, 0x0d, 0xc7, 0x91, 0xd6, 0xfd, 0xec, 0xb6, 0xa2, 0xfd, 0xa1, 0xc0, - 0x8d, 0x89, 0xe5, 0xc7, 0xd7, 0x1d, 0xd2, 0xed, 0x3a, 0xc7, 0xb4, 0x25, 0x8e, 0x2d, 0xc6, 0xeb, - 0x4e, 0x3d, 0x12, 0xe3, 0x21, 0x8e, 0xde, 0x85, 0xb9, 0x16, 0xb5, 0x2d, 0xda, 0x12, 0x8b, 0x51, - 0x31, 0xae, 0xdc, 0x87, 0x42, 0x8a, 0x25, 0xca, 0x79, 0x1e, 0x25, 0xcc, 0xb1, 0xe5, 0x2a, 0x36, - 0xe2, 0x61, 0x21, 0xc5, 0x12, 0x45, 0x75, 0x58, 0xa0, 0xdc, 0x4d, 0xe1, 0xff, 0x8e, 0xe7, 0x39, - 0xc3, 0x8c, 0xae, 0x48, 0x85, 0x85, 0x9d, 0x71, 0x18, 0x9f, 0xe7, 0x6b, 0xff, 0x64, 0x41, 0x9d, - 0x34, 0xda, 0xd0, 0x61, 0xbc, 0x8b, 0x08, 0x50, 0xac, 0x43, 0xe5, 0xda, 0xcd, 0x4b, 0x35, 0x08, - 0xd7, 0x68, 0x2c, 0x4b, 0x47, 0xe6, 0x93, 0xd2, 0xc4, 0xea, 0x22, 0x3e, 0x91, 0x07, 0x8b, 0xf6, - 0xf8, 0xce, 0x1c, 0x2d, 0x55, 0xe5, 0xda, 0xed, 0xcb, 0xb6, 0x83, 0x38, 0x4d, 0x95, 0xa7, 0x2d, - 0x9e, 0x03, 0x18, 0xbe, 0x60, 0x1f, 0xd5, 0x00, 0x2c, 0xdb, 0x74, 0x7a, 0x6e, 0x97, 0xfa, 0x54, - 0x84, 0xad, 0x18, 0xcf, 0xc1, 0xdd, 0x11, 0x82, 0x13, 0xac, 0xb4, 0x78, 0xe7, 0x67, 0x8b, 0x77, - 0xa3, 0x7e, 0x7a, 0x56, 0xc9, 0xbc, 0x38, 0xab, 0x64, 0x5e, 0x9e, 0x55, 0x32, 0xcf, 0xc3, 0x8a, - 0x72, 0x1a, 0x56, 0x94, 0x17, 0x61, 0x45, 0x79, 0x19, 0x56, 0x94, 0xbf, 0xc2, 0x8a, 0xf2, 0xdd, - 0xdf, 0x95, 0xcc, 0xb3, 0xb5, 0x29, 0xff, 0x94, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x45, 0x6f, - 0xe0, 0x61, 0x47, 0x0f, 0x00, 0x00, + // 1247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xfa, 0x47, 0x62, 0x8f, 0xe3, 0x6f, 0xd2, 0xc9, 0x37, 0xcd, 0x36, 0x11, 0x76, 0x64, + 0x24, 0x48, 0xd5, 0xb2, 0x26, 0x51, 0xdb, 0x44, 0x95, 0x0a, 0xf2, 0xaa, 0x01, 0x45, 0x4a, 0x4b, + 0x35, 0x51, 0x22, 0x51, 0x04, 0x62, 0xbc, 0x9e, 0xd8, 0x4b, 0xec, 0xdd, 0xed, 0xcc, 0xac, 0xd3, + 0x70, 0xaa, 0xc4, 0x3f, 0xc0, 0x91, 0x43, 0x0f, 0xfc, 0x07, 0x5c, 0x90, 0xb8, 0x73, 0x40, 0x11, + 0xa7, 0x1e, 0x8b, 0x84, 0x2c, 0x62, 0xce, 0xfc, 0x0f, 0x68, 0x66, 0xc7, 0xde, 0xdd, 0xc4, 0x76, + 0x6d, 0x0e, 0x94, 0x43, 0x6f, 0x9e, 0xf7, 0x79, 0xbf, 0xe7, 0xbd, 0xb7, 0x6f, 0x0c, 0x6e, 0x1c, + 0x6f, 0x33, 0xc3, 0x76, 0x2b, 0xd8, 0xb3, 0x2b, 0xd8, 0xe7, 0x4d, 0x97, 0xda, 0x5f, 0x63, 0x6e, + 0xbb, 0x4e, 0xa5, 0xb3, 0x51, 0x69, 0x10, 0x87, 0x50, 0xcc, 0x49, 0xdd, 0xf0, 0xa8, 0xcb, 0x5d, + 0xb8, 0x1a, 0x30, 0x1b, 0xd8, 0xb3, 0x8d, 0x18, 0xb3, 0xd1, 0xd9, 0x58, 0x79, 0xaf, 0x61, 0xf3, + 0xa6, 0x5f, 0x33, 0x2c, 0xb7, 0x5d, 0x69, 0xb8, 0x0d, 0xb7, 0x22, 0x65, 0x6a, 0xfe, 0x91, 0x3c, + 0xc9, 0x83, 0xfc, 0x15, 0xe8, 0x5a, 0xb9, 0x15, 0x1a, 0x6e, 0x63, 0xab, 0x69, 0x3b, 0x84, 0x9e, + 0x56, 0xbc, 0xe3, 0x86, 0x20, 0xb0, 0x4a, 0x9b, 0x70, 0x3c, 0xc4, 0x83, 0x95, 0xca, 0x28, 0x29, + 0xea, 0x3b, 0xdc, 0x6e, 0x93, 0x4b, 0x02, 0x77, 0x5e, 0x25, 0xc0, 0xac, 0x26, 0x69, 0xe3, 0x8b, + 0x72, 0xe5, 0x2d, 0x00, 0x76, 0x9e, 0x72, 0x8a, 0x0f, 0x71, 0xcb, 0x27, 0xb0, 0x04, 0x32, 0x36, + 0x27, 0x6d, 0xa6, 0x6b, 0x6b, 0xa9, 0xf5, 0x9c, 0x99, 0xeb, 0x75, 0x4b, 0x99, 0x5d, 0x41, 0x40, + 0x01, 0xfd, 0x6e, 0xf6, 0xbb, 0xef, 0x4b, 0x89, 0x67, 0xbf, 0xaf, 0x25, 0xca, 0xbf, 0x6a, 0x60, + 0xf9, 0x23, 0x9b, 0xb4, 0xea, 0xfb, 0xa4, 0x45, 0x2c, 0xee, 0xd2, 0x2a, 0xe7, 0xd4, 0xae, 0xf9, + 0x9c, 0x30, 0x78, 0x1b, 0xe4, 0x29, 0x3e, 0xe9, 0x03, 0xba, 0xb6, 0xa6, 0xad, 0xe7, 0xcc, 0xc5, + 0xb3, 0x6e, 0x29, 0xd1, 0xeb, 0x96, 0xf2, 0x28, 0x84, 0x50, 0x94, 0x0f, 0x3e, 0x05, 0x73, 0x94, + 0x3c, 0xf1, 0x6d, 0x4a, 0xda, 0xc4, 0xe1, 0x4c, 0x4f, 0xae, 0xa5, 0xd6, 0xf3, 0x9b, 0x1f, 0x18, + 0xe1, 0x6d, 0x0c, 0x42, 0x33, 0xbc, 0xe3, 0x86, 0x20, 0x30, 0x43, 0x64, 0xd0, 0xe8, 0x6c, 0x18, + 0x31, 0x5f, 0x50, 0xa8, 0xc6, 0xfc, 0xbf, 0xb2, 0x3b, 0x17, 0x21, 0x32, 0x14, 0xb3, 0x24, 0x83, + 0xd9, 0xc3, 0x35, 0xd2, 0xfa, 0x8f, 0x04, 0x13, 0xf3, 0x65, 0xda, 0x60, 0x7e, 0x4c, 0x02, 0x7d, + 0xcf, 0xb5, 0x70, 0x6b, 0xdf, 0xaf, 0x7d, 0x45, 0x2c, 0x5e, 0xb5, 0x2c, 0xc2, 0x18, 0x22, 0x1d, + 0x9b, 0x9c, 0xc0, 0x2f, 0x41, 0x56, 0x18, 0xa9, 0x63, 0x8e, 0x65, 0x28, 0xf9, 0xcd, 0xf7, 0x27, + 0x73, 0xe9, 0x13, 0xa9, 0xeb, 0x01, 0xe1, 0xd8, 0x84, 0xca, 0x09, 0x10, 0xd2, 0xd0, 0x40, 0x2b, + 0x3c, 0x04, 0x69, 0xe6, 0x11, 0x4b, 0x4f, 0x4a, 0xed, 0xb7, 0x8c, 0x31, 0xbd, 0x64, 0x0c, 0xf1, + 0x70, 0xdf, 0x23, 0x96, 0x39, 0xa7, 0x2c, 0xa4, 0xc5, 0x09, 0x49, 0x7d, 0xf0, 0x0b, 0x30, 0xc3, + 0x38, 0xe6, 0x3e, 0xd3, 0x53, 0x52, 0xf3, 0x9d, 0xa9, 0x35, 0x4b, 0x69, 0xf3, 0x7f, 0x4a, 0xf7, + 0x4c, 0x70, 0x46, 0x4a, 0x6b, 0xf9, 0x33, 0xb0, 0xf4, 0xd0, 0x75, 0x10, 0x61, 0xae, 0x4f, 0x2d, + 0x12, 0x29, 0x80, 0x35, 0x90, 0xf6, 0x30, 0x6f, 0xaa, 0x9b, 0x1f, 0xb8, 0xf6, 0x08, 0xf3, 0x26, + 0x92, 0x88, 0xe0, 0xe8, 0x10, 0x5a, 0x93, 0x21, 0x47, 0x38, 0x0e, 0x09, 0xad, 0x21, 0x89, 0x94, + 0x9f, 0x80, 0xf9, 0x88, 0x72, 0xe4, 0xb7, 0x64, 0xaf, 0x09, 0x28, 0xd6, 0x6b, 0x42, 0x82, 0xa1, + 0x80, 0x0e, 0xef, 0x81, 0x79, 0x27, 0x94, 0x39, 0x40, 0x7b, 0x41, 0x11, 0xe5, 0xcc, 0xc5, 0x5e, + 0xb7, 0x14, 0x55, 0x27, 0x20, 0x74, 0x91, 0xb7, 0xfc, 0x3c, 0x0d, 0xe0, 0x90, 0x68, 0x2a, 0x20, + 0xe7, 0xe0, 0x36, 0x61, 0x1e, 0xb6, 0x88, 0x0a, 0xe9, 0x8a, 0x72, 0x38, 0xf7, 0xb0, 0x0f, 0xa0, + 0x90, 0xe7, 0xd5, 0xc1, 0xc1, 0xb7, 0x41, 0xa6, 0x41, 0x5d, 0xdf, 0x93, 0x17, 0x93, 0x33, 0x0b, + 0x8a, 0x25, 0xf3, 0xb1, 0x20, 0xa2, 0x00, 0x83, 0xd7, 0xc1, 0x6c, 0x87, 0x50, 0x66, 0xbb, 0x8e, + 0x9e, 0x96, 0x6c, 0xf3, 0x8a, 0x6d, 0xf6, 0x30, 0x20, 0xa3, 0x3e, 0x0e, 0x6f, 0x82, 0x2c, 0x55, + 0x8e, 0xeb, 0x19, 0xc9, 0xbb, 0xa0, 0x78, 0xb3, 0x83, 0x0c, 0x0e, 0x38, 0x44, 0x7f, 0x32, 0xbf, + 0x36, 0x10, 0x98, 0x89, 0xf7, 0xe7, 0x7e, 0x08, 0xa1, 0x28, 0x9f, 0x08, 0x4b, 0xc4, 0xa8, 0xcf, + 0xc6, 0xc3, 0x12, 0x29, 0x40, 0x12, 0x81, 0x6d, 0x50, 0x38, 0x8a, 0x0e, 0x15, 0x3d, 0x3b, 0x41, + 0x45, 0x8f, 0x18, 0x89, 0xe6, 0x95, 0x5e, 0xb7, 0x54, 0x88, 0xcf, 0xa8, 0xb8, 0x76, 0x61, 0xae, + 0x15, 0x6d, 0x7b, 0x3d, 0x37, 0x81, 0xb9, 0x11, 0x43, 0x2b, 0x30, 0x17, 0x9f, 0x22, 0x71, 0xed, + 0xe5, 0x9f, 0x35, 0x30, 0x37, 0x5d, 0x3d, 0xde, 0x00, 0x39, 0xec, 0xd9, 0xf2, 0x52, 0xfb, 0x95, + 0x58, 0x10, 0x55, 0x53, 0x7d, 0xb4, 0x1b, 0x10, 0x51, 0x88, 0x0b, 0xe6, 0x7e, 0xaa, 0x45, 0xc3, + 0x0e, 0x98, 0xfb, 0x26, 0x19, 0x0a, 0x71, 0xb8, 0x05, 0x0a, 0xfd, 0x83, 0x2c, 0x41, 0x3d, 0x2d, + 0x05, 0x64, 0x10, 0x28, 0x0a, 0xa0, 0x38, 0x5f, 0xf9, 0xa7, 0x24, 0x58, 0xde, 0x27, 0xad, 0xa3, + 0xd7, 0x33, 0xe9, 0x1e, 0xc7, 0x26, 0xdd, 0xf6, 0xf8, 0x79, 0x34, 0xdc, 0xcb, 0xd7, 0x36, 0xed, + 0x9e, 0x27, 0xc1, 0xea, 0x18, 0x9f, 0xe0, 0x09, 0x80, 0xf4, 0xd2, 0xf0, 0x50, 0x79, 0xac, 0x8c, + 0xf5, 0xe5, 0xf2, 0xcc, 0x31, 0xaf, 0xf6, 0xba, 0xa5, 0x21, 0xb3, 0x08, 0x0d, 0x31, 0x01, 0xbf, + 0xd1, 0xc0, 0x92, 0x33, 0x6c, 0x0e, 0xab, 0x34, 0x6f, 0x8e, 0x35, 0x3e, 0x74, 0x82, 0x9b, 0xd7, + 0x7a, 0xdd, 0xd2, 0xf0, 0xe1, 0x8e, 0x86, 0xdb, 0x12, 0xdf, 0xd0, 0xab, 0x91, 0xf4, 0x88, 0x06, + 0xf9, 0xf7, 0xea, 0xea, 0xd3, 0x58, 0x5d, 0x6d, 0x4d, 0x5a, 0x57, 0x11, 0x27, 0x47, 0x96, 0xd5, + 0xe7, 0x17, 0xca, 0xea, 0xf6, 0x24, 0x65, 0x15, 0x55, 0x3c, 0xbe, 0xaa, 0x1e, 0x80, 0x95, 0xd1, + 0x0e, 0x4d, 0xfd, 0xe9, 0x29, 0xff, 0x90, 0x04, 0x8b, 0x6f, 0x96, 0x98, 0x69, 0xda, 0xfa, 0x97, + 0x34, 0x58, 0x7e, 0xd3, 0xd2, 0xa3, 0xd6, 0x38, 0x9f, 0x11, 0xaa, 0x96, 0x94, 0xc1, 0xe5, 0x1c, + 0x30, 0x42, 0x91, 0x44, 0x60, 0x19, 0xcc, 0x34, 0x82, 0xaf, 0x5b, 0xf0, 0xfd, 0x01, 0x22, 0xc1, + 0xea, 0xd3, 0xa6, 0x10, 0x58, 0x07, 0x19, 0x22, 0xde, 0x4b, 0x7a, 0x46, 0xee, 0xf3, 0x1f, 0xfe, + 0x93, 0xca, 0x30, 0xe4, 0x8b, 0x6b, 0xc7, 0xe1, 0xf4, 0x34, 0x5c, 0x96, 0x24, 0x0d, 0x05, 0xca, + 0xe1, 0x5b, 0x20, 0xe5, 0xdb, 0x75, 0xb5, 0xcb, 0xe4, 0x15, 0x4b, 0xea, 0x60, 0xf7, 0x3e, 0x12, + 0xf4, 0x15, 0xac, 0x1e, 0x6d, 0x52, 0x05, 0x5c, 0x00, 0xa9, 0x63, 0x72, 0x1a, 0x34, 0x14, 0x12, + 0x3f, 0xe1, 0x3d, 0x90, 0xe9, 0x88, 0xf7, 0x9c, 0xca, 0xef, 0xbb, 0x63, 0x9d, 0x0c, 0x9f, 0x7f, + 0x28, 0x90, 0xba, 0x9b, 0xdc, 0xd6, 0xca, 0xbf, 0x69, 0xe0, 0xda, 0xc8, 0xf2, 0x13, 0xcb, 0x1c, + 0x6e, 0xb5, 0xdc, 0x13, 0x52, 0x97, 0x66, 0xb3, 0xe1, 0x32, 0x57, 0x0d, 0xc8, 0xa8, 0x8f, 0xc3, + 0x77, 0xc0, 0x4c, 0x9d, 0x38, 0x36, 0xa9, 0xcb, 0xb5, 0x2f, 0x1b, 0x56, 0xee, 0x7d, 0x49, 0x45, + 0x0a, 0x15, 0x7c, 0x94, 0x60, 0xe6, 0x3a, 0x6a, 0xd1, 0x1c, 0xf0, 0x21, 0x49, 0x45, 0x0a, 0x85, + 0x55, 0x30, 0x4f, 0x84, 0x9b, 0xd2, 0xff, 0x1d, 0x4a, 0xdd, 0xfe, 0x8d, 0x2e, 0x2b, 0x81, 0xf9, + 0x9d, 0x38, 0x8c, 0x2e, 0xf2, 0x97, 0xff, 0x4a, 0x02, 0x7d, 0xd4, 0x68, 0x83, 0x47, 0xe1, 0x2e, + 0x22, 0x41, 0xb9, 0x0e, 0xe5, 0x37, 0xaf, 0x4f, 0xd4, 0x20, 0x42, 0xc2, 0x5c, 0x52, 0x8e, 0x14, + 0xa2, 0xd4, 0xc8, 0xea, 0x22, 0x8f, 0x90, 0x82, 0x05, 0x27, 0xfe, 0x22, 0xe8, 0xbf, 0x11, 0x6f, + 0x4e, 0xda, 0x0e, 0xd2, 0x9a, 0xae, 0xac, 0x2d, 0x5c, 0x00, 0x18, 0xba, 0xa4, 0x1f, 0x6e, 0x02, + 0x60, 0x3b, 0x96, 0xdb, 0xf6, 0x5a, 0x84, 0x13, 0x99, 0xb6, 0x6c, 0x38, 0x07, 0x77, 0x07, 0x08, + 0x8a, 0x70, 0x0d, 0xcb, 0x77, 0x7a, 0xba, 0x7c, 0x9b, 0xd5, 0xb3, 0xf3, 0x62, 0xe2, 0xc5, 0x79, + 0x31, 0xf1, 0xf2, 0xbc, 0x98, 0x78, 0xd6, 0x2b, 0x6a, 0x67, 0xbd, 0xa2, 0xf6, 0xa2, 0x57, 0xd4, + 0x5e, 0xf6, 0x8a, 0xda, 0x1f, 0xbd, 0xa2, 0xf6, 0xed, 0x9f, 0xc5, 0xc4, 0xe3, 0xd5, 0x31, 0xff, + 0xd0, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x8c, 0x77, 0x0f, 0xbf, 0x11, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -565,6 +631,90 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LabelSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -712,6 +862,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1277,6 +1451,40 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *FieldSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *LocalSubjectAccessReview) Size() (n int) { if m == nil { return 0 @@ -1346,6 +1554,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1536,6 +1752,38 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *FieldSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]FieldSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&FieldSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]LabelSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&LabelSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} func (this *LocalSubjectAccessReview) String() string { if this == nil { return "nil" @@ -1582,6 +1830,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(this.FieldSelector.String(), "FieldSelectorAttributes", "FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(this.LabelSelector.String(), "LabelSelectorAttributes", "LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -1807,6 +2057,238 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.FieldSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.LabelSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2437,6 +2919,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index 83283d0bdb..37b05b8552 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -37,6 +37,60 @@ message ExtraValue { repeated string items = 1; } +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message FieldSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement requirements = 2; +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message LabelSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement requirements = 2; +} + // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. @@ -44,7 +98,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -111,6 +165,20 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, @@ -145,7 +213,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -177,7 +245,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -198,7 +266,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index 3b42956f89..36f5fa4107 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -26,6 +26,7 @@ import ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { @@ -47,6 +48,7 @@ type SubjectAccessReview struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able @@ -69,6 +71,7 @@ type SelfSubjectAccessReview struct { // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions @@ -115,6 +118,72 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type LabelSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.LabelSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` +} + +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type FieldSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.FieldSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface @@ -198,6 +267,7 @@ type SubjectAccessReviewStatus struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. // The returned list of actions may be incomplete depending on the server's authorization mode, diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 93229485cc..dc6b8a89ec 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -27,6 +27,26 @@ package v1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FieldSelectorAttributes = map[string]string{ + "": "FieldSelectorAttributes indicates a field limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a field selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (FieldSelectorAttributes) SwaggerDoc() map[string]string { + return map_FieldSelectorAttributes +} + +var map_LabelSelectorAttributes = map[string]string{ + "": "LabelSelectorAttributes indicates a label limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a label selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (LabelSelectorAttributes) SwaggerDoc() map[string]string { + return map_LabelSelectorAttributes +} + var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -59,14 +79,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index f1d49eb386..7f040f5c56 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -45,6 +46,52 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorAttributes) DeepCopyInto(out *FieldSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.FieldSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorAttributes. +func (in *FieldSelectorAttributes) DeepCopy() *FieldSelectorAttributes { + if in == nil { + return nil + } + out := new(FieldSelectorAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorAttributes) DeepCopyInto(out *LabelSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorAttributes. +func (in *LabelSelectorAttributes) DeepCopy() *LabelSelectorAttributes { + if in == nil { + return nil + } + out := new(LabelSelectorAttributes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in @@ -118,6 +165,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +258,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +356,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b0c0475b48 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LocalSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectRulesReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 28642ba638..5007d1b496 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/authorization/v1" math "math" math_bits "math/bits" @@ -459,78 +460,82 @@ func init() { } var fileDescriptor_8eab727787743457 = []byte{ - // 1130 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xf7, 0xfa, 0x47, 0x62, 0x3f, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xe6, 0x2b, 0x6c, 0xcb, - 0x48, 0x28, 0x88, 0xb2, 0xdb, 0x44, 0x85, 0x94, 0x40, 0x0f, 0xb1, 0x12, 0x50, 0xa4, 0xb6, 0x54, - 0x13, 0x25, 0x07, 0x2a, 0x01, 0xe3, 0xf5, 0xc4, 0x5e, 0x62, 0xef, 0x2e, 0x3b, 0xb3, 0x0e, 0x41, - 0x1c, 0x7a, 0xe4, 0xc8, 0x91, 0x23, 0x27, 0xfe, 0x07, 0x2e, 0x48, 0x70, 0xca, 0xb1, 0xc7, 0x20, - 0x21, 0x8b, 0x2c, 0x7f, 0x04, 0x57, 0x34, 0xb3, 0x63, 0xef, 0x3a, 0xd9, 0xc4, 0x49, 0x0e, 0xf4, - 0xd2, 0x9b, 0xe7, 0x7d, 0x3e, 0xef, 0xcd, 0x7b, 0x6f, 0xde, 0x7b, 0xfb, 0x0c, 0xf7, 0x0f, 0x1e, - 0x32, 0xc3, 0x76, 0x4d, 0xe2, 0xd9, 0x26, 0x09, 0x78, 0xc7, 0xf5, 0xed, 0x6f, 0x09, 0xb7, 0x5d, - 0xc7, 0xec, 0xaf, 0x34, 0x29, 0x27, 0x2b, 0x66, 0x9b, 0x3a, 0xd4, 0x27, 0x9c, 0xb6, 0x0c, 0xcf, - 0x77, 0xb9, 0x8b, 0x6a, 0x91, 0x86, 0x41, 0x3c, 0xdb, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xd2, 0xbb, - 0x6d, 0x9b, 0x77, 0x82, 0xa6, 0x61, 0xb9, 0x3d, 0xb3, 0xed, 0xb6, 0x5d, 0x53, 0x2a, 0x36, 0x83, - 0x7d, 0x79, 0x92, 0x07, 0xf9, 0x2b, 0x32, 0xb8, 0xf4, 0x20, 0x76, 0xa1, 0x47, 0xac, 0x8e, 0xed, - 0x50, 0xff, 0xc8, 0xf4, 0x0e, 0xda, 0x42, 0xc0, 0xcc, 0x1e, 0xe5, 0xc4, 0xec, 0x9f, 0x73, 0x63, - 0xc9, 0xbc, 0x48, 0xcb, 0x0f, 0x1c, 0x6e, 0xf7, 0xe8, 0x39, 0x85, 0xf7, 0x27, 0x29, 0x30, 0xab, - 0x43, 0x7b, 0xe4, 0xac, 0x5e, 0x7d, 0x0d, 0x60, 0xeb, 0x1b, 0xee, 0x93, 0x3d, 0xd2, 0x0d, 0x28, - 0xaa, 0x42, 0xc1, 0xe6, 0xb4, 0xc7, 0x74, 0xad, 0x96, 0x5b, 0x2e, 0x35, 0x4a, 0xe1, 0xa0, 0x5a, - 0xd8, 0x16, 0x02, 0x1c, 0xc9, 0xd7, 0x8b, 0x3f, 0xfe, 0x54, 0xcd, 0xbc, 0xf8, 0xb3, 0x96, 0xa9, - 0xff, 0x9a, 0x05, 0xfd, 0xb1, 0x6b, 0x91, 0xee, 0x4e, 0xd0, 0xfc, 0x8a, 0x5a, 0x7c, 0xc3, 0xb2, - 0x28, 0x63, 0x98, 0xf6, 0x6d, 0x7a, 0x88, 0xbe, 0x84, 0xa2, 0x88, 0xac, 0x45, 0x38, 0xd1, 0xb5, - 0x9a, 0xb6, 0x5c, 0x5e, 0xbd, 0x6f, 0xc4, 0x89, 0x1d, 0x39, 0x68, 0x78, 0x07, 0x6d, 0x21, 0x60, - 0x86, 0x60, 0x1b, 0xfd, 0x15, 0xe3, 0x53, 0x69, 0xeb, 0x09, 0xe5, 0xa4, 0x81, 0x8e, 0x07, 0xd5, - 0x4c, 0x38, 0xa8, 0x42, 0x2c, 0xc3, 0x23, 0xab, 0xe8, 0x39, 0xe4, 0x99, 0x47, 0x2d, 0x3d, 0x2b, - 0xad, 0x7f, 0x60, 0x4c, 0x7a, 0x36, 0x23, 0xc5, 0xcd, 0x1d, 0x8f, 0x5a, 0x8d, 0x5b, 0xea, 0x9a, - 0xbc, 0x38, 0x61, 0x69, 0x14, 0x59, 0x30, 0xc5, 0x38, 0xe1, 0x01, 0xd3, 0x73, 0xd2, 0xfc, 0x87, - 0x37, 0x33, 0x2f, 0x4d, 0x34, 0xfe, 0xa7, 0x2e, 0x98, 0x8a, 0xce, 0x58, 0x99, 0xae, 0x3f, 0x87, - 0x85, 0xa7, 0xae, 0x83, 0x29, 0x73, 0x03, 0xdf, 0xa2, 0x1b, 0x9c, 0xfb, 0x76, 0x33, 0xe0, 0x94, - 0xa1, 0x1a, 0xe4, 0x3d, 0xc2, 0x3b, 0x32, 0x71, 0xa5, 0xd8, 0xbf, 0x67, 0x84, 0x77, 0xb0, 0x44, - 0x04, 0xa3, 0x4f, 0xfd, 0xa6, 0x0c, 0x3e, 0xc1, 0xd8, 0xa3, 0x7e, 0x13, 0x4b, 0xa4, 0xfe, 0x35, - 0xcc, 0x26, 0x8c, 0xe3, 0xa0, 0x2b, 0xdf, 0x56, 0x40, 0x63, 0x6f, 0x2b, 0x34, 0x18, 0x8e, 0xe4, - 0xe8, 0x11, 0xcc, 0x3a, 0xb1, 0xce, 0x2e, 0x7e, 0xcc, 0xf4, 0xac, 0xa4, 0xce, 0x87, 0x83, 0x6a, - 0xd2, 0x9c, 0x80, 0xf0, 0x59, 0xae, 0x28, 0x08, 0x94, 0x12, 0x8d, 0x09, 0x25, 0x87, 0xf4, 0x28, - 0xf3, 0x88, 0x45, 0x55, 0x48, 0xb7, 0x95, 0xc3, 0xa5, 0xa7, 0x43, 0x00, 0xc7, 0x9c, 0xc9, 0xc1, - 0xa1, 0x37, 0xa1, 0xd0, 0xf6, 0xdd, 0xc0, 0x93, 0xaf, 0x53, 0x6a, 0xcc, 0x28, 0x4a, 0xe1, 0x13, - 0x21, 0xc4, 0x11, 0x86, 0xde, 0x86, 0xe9, 0x3e, 0xf5, 0x99, 0xed, 0x3a, 0x7a, 0x5e, 0xd2, 0x66, - 0x15, 0x6d, 0x7a, 0x2f, 0x12, 0xe3, 0x21, 0x8e, 0xee, 0x41, 0xd1, 0x57, 0x8e, 0xeb, 0x05, 0xc9, - 0x9d, 0x53, 0xdc, 0xe2, 0x28, 0x83, 0x23, 0x06, 0x7a, 0x0f, 0xca, 0x2c, 0x68, 0x8e, 0x14, 0xa6, - 0xa4, 0xc2, 0xbc, 0x52, 0x28, 0xef, 0xc4, 0x10, 0x4e, 0xf2, 0x44, 0x58, 0x22, 0x46, 0x7d, 0x7a, - 0x3c, 0x2c, 0x91, 0x02, 0x2c, 0x91, 0xfa, 0xef, 0x1a, 0xdc, 0xba, 0xde, 0x8b, 0xbd, 0x03, 0x25, - 0xe2, 0xd9, 0x32, 0xec, 0xe1, 0x5b, 0xcd, 0x88, 0xbc, 0x6e, 0x3c, 0xdb, 0x8e, 0x84, 0x38, 0xc6, - 0x05, 0x79, 0xe8, 0x8c, 0xa8, 0xeb, 0x11, 0x79, 0x78, 0x25, 0xc3, 0x31, 0x8e, 0xd6, 0x60, 0x66, - 0x78, 0x90, 0x8f, 0xa4, 0xe7, 0xa5, 0xc2, 0xed, 0x70, 0x50, 0x9d, 0xc1, 0x49, 0x00, 0x8f, 0xf3, - 0xea, 0xbf, 0x65, 0x61, 0x71, 0x87, 0x76, 0xf7, 0x5f, 0xcd, 0x54, 0xf8, 0x62, 0x6c, 0x2a, 0x3c, - 0xba, 0x42, 0xdb, 0xa6, 0xbb, 0xfa, 0x6a, 0x27, 0xc3, 0xcf, 0x59, 0xf8, 0xff, 0x25, 0x8e, 0xa1, - 0xef, 0x00, 0xf9, 0xe7, 0x1a, 0x4d, 0x65, 0xf4, 0xc1, 0x64, 0x87, 0xce, 0x37, 0x69, 0xe3, 0x4e, - 0x38, 0xa8, 0xa6, 0x34, 0x2f, 0x4e, 0xb9, 0x07, 0x7d, 0xaf, 0xc1, 0x82, 0x93, 0x36, 0xb8, 0x54, - 0xd6, 0xd7, 0x26, 0x7b, 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xe2, 0xf4, - 0x0b, 0xc5, 0xc8, 0xb9, 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xae, 0xd6, 0x3e, 0x1f, 0xab, 0xb5, - 0x8f, 0xae, 0x55, 0x6b, 0x09, 0x4f, 0x2f, 0x2c, 0xb5, 0xe6, 0x99, 0x52, 0x5b, 0xbf, 0x72, 0xa9, - 0x25, 0xad, 0x5f, 0x5e, 0x69, 0x4f, 0x60, 0xe9, 0x62, 0xaf, 0xae, 0x3d, 0xba, 0xeb, 0xbf, 0x64, - 0x61, 0xfe, 0xf5, 0x3a, 0x70, 0xb3, 0xa6, 0x3f, 0xc9, 0xc3, 0xe2, 0xeb, 0x86, 0xbf, 0xbc, 0xe1, - 0xc5, 0x47, 0x34, 0x60, 0xd4, 0x57, 0x1f, 0xfe, 0xd1, 0x5b, 0xed, 0x32, 0xea, 0x63, 0x89, 0xa0, - 0xda, 0x70, 0x37, 0x88, 0x3e, 0x58, 0x20, 0x32, 0xad, 0xbe, 0x85, 0x6a, 0x31, 0xb0, 0xa1, 0x40, - 0xc5, 0xc6, 0xab, 0x17, 0x6a, 0xb9, 0xe5, 0xf2, 0xea, 0xe6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, - 0x96, 0xc3, 0xfd, 0xa3, 0x78, 0x07, 0x91, 0x32, 0x1c, 0xdd, 0x80, 0xde, 0x80, 0x5c, 0x60, 0xb7, - 0xd4, 0x8a, 0x50, 0x56, 0x94, 0xdc, 0xee, 0xf6, 0x26, 0x16, 0xf2, 0xa5, 0x7d, 0xb5, 0x7b, 0x4b, - 0x13, 0x68, 0x0e, 0x72, 0x07, 0xf4, 0x28, 0xea, 0x33, 0x2c, 0x7e, 0xa2, 0x06, 0x14, 0xfa, 0x62, - 0x2d, 0x57, 0x79, 0xbe, 0x37, 0xd9, 0xd3, 0x78, 0x95, 0xc7, 0x91, 0xea, 0x7a, 0xf6, 0xa1, 0x56, - 0xff, 0x43, 0x83, 0xbb, 0x17, 0x16, 0xa4, 0x58, 0x94, 0x48, 0xb7, 0xeb, 0x1e, 0xd2, 0x96, 0xbc, - 0xbb, 0x18, 0x2f, 0x4a, 0x1b, 0x91, 0x18, 0x0f, 0x71, 0xf4, 0x16, 0x4c, 0xb5, 0xa8, 0x63, 0xd3, - 0x96, 0x5c, 0xa9, 0x8a, 0x71, 0x2d, 0x6f, 0x4a, 0x29, 0x56, 0xa8, 0xe0, 0xf9, 0x94, 0x30, 0xd7, - 0x51, 0x4b, 0xdc, 0x88, 0x87, 0xa5, 0x14, 0x2b, 0x14, 0x6d, 0xc0, 0x2c, 0x15, 0x6e, 0xca, 0x20, - 0xb6, 0x7c, 0xdf, 0x1d, 0xbe, 0xec, 0xa2, 0x52, 0x98, 0xdd, 0x1a, 0x87, 0xf1, 0x59, 0x7e, 0xfd, - 0x9f, 0x2c, 0xe8, 0x17, 0x8d, 0x3d, 0x74, 0x10, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xd5, - 0xb8, 0x7a, 0xcb, 0x08, 0xb5, 0xc6, 0x82, 0xf2, 0x66, 0x26, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x88, - 0x0e, 0x61, 0xce, 0x19, 0x5f, 0xb9, 0xa3, 0x9d, 0xac, 0xbc, 0xba, 0x72, 0xad, 0x06, 0x91, 0x57, - 0xea, 0xea, 0xca, 0xb9, 0x33, 0x00, 0xc3, 0xe7, 0x2e, 0x41, 0xab, 0x00, 0xb6, 0x63, 0xb9, 0x3d, - 0xaf, 0x4b, 0x39, 0x95, 0x09, 0x2c, 0xc6, 0xd3, 0x72, 0x7b, 0x84, 0xe0, 0x04, 0x2b, 0x2d, 0xf3, - 0xf9, 0xeb, 0x65, 0xbe, 0xf1, 0xf1, 0xf1, 0x69, 0x25, 0xf3, 0xf2, 0xb4, 0x92, 0x39, 0x39, 0xad, - 0x64, 0x5e, 0x84, 0x15, 0xed, 0x38, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x12, 0x56, 0xb4, 0xbf, - 0xc2, 0x8a, 0xf6, 0xc3, 0xdf, 0x95, 0xcc, 0x67, 0xb5, 0x49, 0xff, 0xc0, 0xff, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0xcd, 0x08, 0x09, 0x84, 0xa4, 0x0f, 0x00, 0x00, + // 1192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0xf3, 0xa7, 0x4d, 0x26, 0x1b, 0xda, 0x9d, 0xaa, 0x5b, 0x6f, 0x11, 0x49, 0x14, 0x24, + 0x54, 0xb4, 0x8b, 0xb3, 0xad, 0x0a, 0x5d, 0x0a, 0x7b, 0xa8, 0xd5, 0x2e, 0xaa, 0xd4, 0x5d, 0x56, + 0x53, 0xb5, 0x07, 0x56, 0x02, 0x26, 0xce, 0x34, 0x31, 0x75, 0x6c, 0xe3, 0x19, 0xa7, 0x14, 0x71, + 0xd8, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x70, 0xe2, 0x3b, 0x70, 0x41, 0x82, 0x53, 0x8f, 0x7b, 0x2c, + 0x12, 0x8a, 0xa8, 0xf9, 0x10, 0x5c, 0xd1, 0x8c, 0x27, 0xb1, 0x9d, 0xba, 0x4d, 0xdb, 0x03, 0x7b, + 0xd9, 0x5b, 0xe6, 0xfd, 0x7e, 0xef, 0xcf, 0xbc, 0x79, 0xf3, 0xfc, 0x26, 0xe0, 0xc1, 0xe1, 0x43, + 0xaa, 0x99, 0x4e, 0x13, 0xbb, 0x66, 0x13, 0xfb, 0xac, 0xeb, 0x78, 0xe6, 0xb7, 0x98, 0x99, 0x8e, + 0xdd, 0xec, 0x2f, 0xb7, 0x08, 0xc3, 0xcb, 0xcd, 0x0e, 0xb1, 0x89, 0x87, 0x19, 0x69, 0x6b, 0xae, + 0xe7, 0x30, 0x07, 0xd6, 0x43, 0x0d, 0x0d, 0xbb, 0xa6, 0x96, 0xd0, 0xd0, 0xa4, 0xc6, 0xe2, 0x7b, + 0x1d, 0x93, 0x75, 0xfd, 0x96, 0x66, 0x38, 0xbd, 0x66, 0xc7, 0xe9, 0x38, 0x4d, 0xa1, 0xd8, 0xf2, + 0x0f, 0xc4, 0x4a, 0x2c, 0xc4, 0xaf, 0xd0, 0xe0, 0xe2, 0xbd, 0x4b, 0x42, 0x18, 0xf7, 0xbe, 0xb8, + 0x1a, 0x91, 0x7b, 0xd8, 0xe8, 0x9a, 0x36, 0xf1, 0x8e, 0x9b, 0xee, 0x61, 0x87, 0x0b, 0x68, 0xb3, + 0x47, 0x18, 0x4e, 0xd3, 0x6a, 0x5e, 0xa4, 0xe5, 0xf9, 0x36, 0x33, 0x7b, 0xe4, 0x9c, 0xc2, 0x07, + 0x93, 0x14, 0xa8, 0xd1, 0x25, 0x3d, 0x3c, 0xae, 0xd7, 0x58, 0x03, 0x60, 0xeb, 0x1b, 0xe6, 0xe1, + 0x7d, 0x6c, 0xf9, 0x04, 0xd6, 0x40, 0xc1, 0x64, 0xa4, 0x47, 0x55, 0xa5, 0x9e, 0x5b, 0x2a, 0xe9, + 0xa5, 0x60, 0x50, 0x2b, 0x6c, 0x73, 0x01, 0x0a, 0xe5, 0xeb, 0xc5, 0x1f, 0x7f, 0xae, 0x65, 0x5e, + 0xfc, 0x55, 0xcf, 0x34, 0x7e, 0xcb, 0x02, 0x75, 0xc7, 0x31, 0xb0, 0xb5, 0xeb, 0xb7, 0xbe, 0x22, + 0x06, 0xdb, 0x30, 0x0c, 0x42, 0x29, 0x22, 0x7d, 0x93, 0x1c, 0xc1, 0x2f, 0x41, 0x91, 0xef, 0xac, + 0x8d, 0x19, 0x56, 0x95, 0xba, 0xb2, 0x54, 0x5e, 0x79, 0xa0, 0x45, 0xa7, 0x30, 0x0a, 0x50, 0x73, + 0x0f, 0x3b, 0x5c, 0x40, 0x35, 0xce, 0xd6, 0xfa, 0xcb, 0xda, 0xa7, 0xc2, 0xd6, 0x13, 0xc2, 0xb0, + 0x0e, 0x4f, 0x06, 0xb5, 0x4c, 0x30, 0xa8, 0x81, 0x48, 0x86, 0x46, 0x56, 0xe1, 0x73, 0x90, 0xa7, + 0x2e, 0x31, 0xd4, 0xac, 0xb0, 0xfe, 0xa1, 0x36, 0xe9, 0x8c, 0xb5, 0x94, 0x30, 0x77, 0x5d, 0x62, + 0xe8, 0xb7, 0xa4, 0x9b, 0x3c, 0x5f, 0x21, 0x61, 0x14, 0x1a, 0x60, 0x8a, 0x32, 0xcc, 0x7c, 0xaa, + 0xe6, 0x84, 0xf9, 0x8f, 0x6e, 0x66, 0x5e, 0x98, 0xd0, 0xdf, 0x90, 0x0e, 0xa6, 0xc2, 0x35, 0x92, + 0xa6, 0x1b, 0xcf, 0xc1, 0xfc, 0x53, 0xc7, 0x46, 0x84, 0x3a, 0xbe, 0x67, 0x90, 0x0d, 0xc6, 0x3c, + 0xb3, 0xe5, 0x33, 0x42, 0x61, 0x1d, 0xe4, 0x5d, 0xcc, 0xba, 0x22, 0x71, 0xa5, 0x28, 0xbe, 0x67, + 0x98, 0x75, 0x91, 0x40, 0x38, 0xa3, 0x4f, 0xbc, 0x96, 0xd8, 0x7c, 0x8c, 0xb1, 0x4f, 0xbc, 0x16, + 0x12, 0x48, 0xe3, 0x6b, 0x30, 0x13, 0x33, 0x8e, 0x7c, 0x4b, 0x9c, 0x2d, 0x87, 0x12, 0x67, 0xcb, + 0x35, 0x28, 0x0a, 0xe5, 0xf0, 0x11, 0x98, 0xb1, 0x23, 0x9d, 0x3d, 0xb4, 0x43, 0xd5, 0xac, 0xa0, + 0xce, 0x05, 0x83, 0x5a, 0xdc, 0x1c, 0x87, 0xd0, 0x38, 0xb7, 0xf1, 0x53, 0x1e, 0xc0, 0x94, 0xdd, + 0x34, 0x41, 0xc9, 0xc6, 0x3d, 0x42, 0x5d, 0x6c, 0x10, 0xb9, 0xa5, 0xdb, 0x32, 0xe0, 0xd2, 0xd3, + 0x21, 0x80, 0x22, 0xce, 0xe4, 0xcd, 0xc1, 0xb7, 0x41, 0xa1, 0xe3, 0x39, 0xbe, 0x2b, 0x4e, 0xa7, + 0xa4, 0x57, 0x24, 0xa5, 0xf0, 0x09, 0x17, 0xa2, 0x10, 0x83, 0xef, 0x82, 0xe9, 0x3e, 0xf1, 0xa8, + 0xe9, 0xd8, 0x6a, 0x5e, 0xd0, 0x66, 0x24, 0x6d, 0x7a, 0x3f, 0x14, 0xa3, 0x21, 0x0e, 0xef, 0x83, + 0xa2, 0x27, 0x03, 0x57, 0x0b, 0x82, 0x3b, 0x2b, 0xb9, 0xc5, 0x51, 0x06, 0x47, 0x0c, 0xf8, 0x3e, + 0x28, 0x53, 0xbf, 0x35, 0x52, 0x98, 0x12, 0x0a, 0x73, 0x52, 0xa1, 0xbc, 0x1b, 0x41, 0x28, 0xce, + 0xe3, 0xdb, 0xe2, 0x7b, 0x54, 0xa7, 0x93, 0xdb, 0xe2, 0x29, 0x40, 0x02, 0x81, 0x3d, 0x50, 0x39, + 0x30, 0x89, 0xd5, 0xde, 0x25, 0x16, 0x31, 0x98, 0xe3, 0xa9, 0x45, 0x51, 0x7c, 0xab, 0x97, 0x15, + 0x9f, 0xf6, 0x38, 0xae, 0x11, 0xa5, 0x5d, 0xbf, 0x1d, 0x0c, 0x6a, 0x95, 0x04, 0x88, 0x92, 0xd6, + 0xb9, 0x3b, 0x0b, 0xb7, 0x88, 0x35, 0x72, 0x57, 0xba, 0x82, 0xbb, 0x9d, 0xb8, 0xc6, 0xb8, 0xbb, + 0x04, 0x88, 0x92, 0xd6, 0x1b, 0x7f, 0x28, 0xe0, 0xd6, 0xf5, 0xea, 0xf1, 0x1e, 0x28, 0x61, 0xd7, + 0x14, 0x87, 0x3a, 0xac, 0xc4, 0x0a, 0xaf, 0x9a, 0x8d, 0x67, 0xdb, 0xa1, 0x10, 0x45, 0x38, 0x27, + 0x0f, 0x53, 0xcd, 0x6f, 0xed, 0x88, 0x3c, 0x74, 0x49, 0x51, 0x84, 0xc3, 0x35, 0x50, 0x19, 0x2e, + 0x44, 0x09, 0xaa, 0x79, 0xa1, 0x20, 0x36, 0x81, 0xe2, 0x00, 0x4a, 0xf2, 0x1a, 0xbf, 0x67, 0xc1, + 0xc2, 0x2e, 0xb1, 0x0e, 0x5e, 0x4d, 0xcf, 0xfb, 0x22, 0xd1, 0xf3, 0x1e, 0x5d, 0xa1, 0x29, 0xa5, + 0x87, 0xfa, 0x6a, 0xfb, 0xde, 0x2f, 0x59, 0xf0, 0xe6, 0x25, 0x81, 0xc1, 0xef, 0x00, 0xf4, 0xce, + 0xb5, 0x11, 0x99, 0xd1, 0xd5, 0xc9, 0x01, 0x9d, 0x6f, 0x41, 0xfa, 0x9d, 0x60, 0x50, 0x4b, 0x69, + 0x4d, 0x28, 0xc5, 0x0f, 0xfc, 0x5e, 0x01, 0xf3, 0x76, 0x5a, 0x5b, 0x96, 0x59, 0x5f, 0x9b, 0x1c, + 0x41, 0x6a, 0x57, 0xd7, 0xef, 0x06, 0x83, 0x5a, 0x7a, 0xc3, 0x47, 0xe9, 0x0e, 0xf9, 0x17, 0xf6, + 0x4e, 0x2c, 0x51, 0xfc, 0xd2, 0xfc, 0x7f, 0xb5, 0xf6, 0x79, 0xa2, 0xd6, 0x3e, 0xbe, 0x56, 0xad, + 0xc5, 0x22, 0xbd, 0xb0, 0xd4, 0x5a, 0x63, 0xa5, 0xb6, 0x7e, 0xe5, 0x52, 0x8b, 0x5b, 0xbf, 0xbc, + 0xd2, 0x9e, 0x80, 0xc5, 0x8b, 0xa3, 0xba, 0xf6, 0x87, 0xa9, 0xf1, 0x6b, 0x16, 0xcc, 0xbd, 0x1e, + 0x76, 0x6e, 0x76, 0xe9, 0x4f, 0xf3, 0x60, 0xe1, 0xf5, 0x85, 0xbf, 0xfc, 0xc2, 0xf3, 0x11, 0xc1, + 0xa7, 0xc4, 0x93, 0x63, 0xcd, 0xe8, 0xac, 0xf6, 0x28, 0xf1, 0x90, 0x40, 0x60, 0x7d, 0x38, 0xf9, + 0x84, 0x1f, 0x2c, 0xc0, 0x33, 0x2d, 0xbf, 0x85, 0x72, 0xec, 0x31, 0x41, 0x81, 0xf0, 0x79, 0x5e, + 0x2d, 0xd4, 0x73, 0x4b, 0xe5, 0x95, 0xcd, 0x1b, 0xd7, 0x8a, 0x26, 0x9e, 0x05, 0x5b, 0x36, 0xf3, + 0x8e, 0xa3, 0x09, 0x4b, 0xc8, 0x50, 0xe8, 0x01, 0xbe, 0x05, 0x72, 0xbe, 0xd9, 0x96, 0x03, 0x50, + 0x59, 0x52, 0x72, 0x7b, 0xdb, 0x9b, 0x88, 0xcb, 0x17, 0x0f, 0xe4, 0xcb, 0x42, 0x98, 0x80, 0xb3, + 0x20, 0x77, 0x48, 0x8e, 0xc3, 0x7b, 0x86, 0xf8, 0x4f, 0xa8, 0x83, 0x42, 0x9f, 0x3f, 0x3a, 0x64, + 0x9e, 0xef, 0x4f, 0x8e, 0x34, 0x7a, 0xa8, 0xa0, 0x50, 0x75, 0x3d, 0xfb, 0x50, 0x69, 0xfc, 0xa9, + 0x80, 0xbb, 0x17, 0x16, 0x24, 0x1f, 0x03, 0xb1, 0x65, 0x39, 0x47, 0xa4, 0x2d, 0x7c, 0x17, 0xa3, + 0x31, 0x70, 0x23, 0x14, 0xa3, 0x21, 0x0e, 0xdf, 0x01, 0x53, 0x6d, 0x62, 0x9b, 0xa4, 0x2d, 0x06, + 0xc6, 0x62, 0x54, 0xcb, 0x9b, 0x42, 0x8a, 0x24, 0xca, 0x79, 0x1e, 0xc1, 0xd4, 0xb1, 0xe5, 0x88, + 0x3a, 0xe2, 0x21, 0x21, 0x45, 0x12, 0x85, 0x1b, 0x60, 0x86, 0xf0, 0x30, 0xc5, 0x26, 0xb6, 0x3c, + 0xcf, 0x19, 0x9e, 0xec, 0x82, 0x54, 0x98, 0xd9, 0x4a, 0xc2, 0x68, 0x9c, 0xdf, 0xf8, 0x37, 0x0b, + 0xd4, 0x8b, 0xda, 0x1e, 0x3c, 0x8c, 0xa6, 0x18, 0x01, 0x8a, 0x41, 0xaa, 0xbc, 0xa2, 0x5d, 0xfd, + 0xca, 0x70, 0x35, 0x7d, 0x5e, 0x46, 0x53, 0x89, 0x4b, 0x63, 0x93, 0x8f, 0x58, 0xc2, 0x23, 0x30, + 0x6b, 0x27, 0x1f, 0x14, 0xe1, 0x4c, 0x56, 0x5e, 0x59, 0xbe, 0xd6, 0x05, 0x11, 0x2e, 0x55, 0xe9, + 0x72, 0x76, 0x0c, 0xa0, 0xe8, 0x9c, 0x13, 0xb8, 0x02, 0x80, 0x69, 0x1b, 0x4e, 0xcf, 0xb5, 0x08, + 0x23, 0x22, 0x81, 0xc5, 0xa8, 0x5b, 0x6e, 0x8f, 0x10, 0x14, 0x63, 0xa5, 0x65, 0x3e, 0x7f, 0xbd, + 0xcc, 0xeb, 0x8f, 0x4f, 0xce, 0xaa, 0x99, 0x97, 0x67, 0xd5, 0xcc, 0xe9, 0x59, 0x35, 0xf3, 0x22, + 0xa8, 0x2a, 0x27, 0x41, 0x55, 0x79, 0x19, 0x54, 0x95, 0xd3, 0xa0, 0xaa, 0xfc, 0x1d, 0x54, 0x95, + 0x1f, 0xfe, 0xa9, 0x66, 0x3e, 0xab, 0x4f, 0xfa, 0x33, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x46, 0xf7, 0xe0, 0x3d, 0xaf, 0x10, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -712,6 +717,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1346,6 +1375,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1582,6 +1619,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(fmt.Sprintf("%v", this.FieldSelector), "FieldSelectorAttributes", "v11.FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelectorAttributes", "v11.LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -2437,6 +2476,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &v11.FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v11.LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 43bea7aa12..8738768b89 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.authorization.v1beta1; +import "k8s.io/api/authorization/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -44,7 +45,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -111,6 +112,14 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, @@ -145,7 +154,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -177,7 +186,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -198,7 +207,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index ef3a501b05..8b8e5a9867 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -124,6 +125,12 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + FieldSelector *authorizationv1.FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + LabelSelector *authorizationv1.LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index e0846be7a4..bb1352a2d9 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -59,14 +59,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index 13f09cf2d2..d76993dba4 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/authorization/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -118,6 +119,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(v1.FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +212,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +310,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 8c9c09b5cb..d64c9cbc1a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 1dbafd1a53..0a961312f4 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target. optional string container = 5; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling taget optional string container = 4; @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,21 +131,21 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -168,7 +168,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -184,7 +184,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -222,7 +222,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; @@ -336,18 +336,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric. // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -360,18 +360,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -384,13 +384,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -401,13 +401,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -431,7 +431,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -455,14 +455,14 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 4508290176..b31425b3b7 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -83,6 +83,7 @@ type HorizontalPodAutoscalerStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { @@ -101,6 +102,7 @@ type HorizontalPodAutoscaler struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { @@ -114,6 +116,7 @@ type HorizontalPodAutoscalerList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Scale represents a scaling request for a resource. type Scale struct { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..36d86a5ec3 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Scale) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go index f96a059b6c..aafa2d4de2 100644 --- a/vendor/k8s.io/api/autoscaling/v2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v2 // import "k8s.io/api/autoscaling/v2" diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index a9e36975fc..8f2ee58031 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -293,7 +293,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -393,12 +393,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -412,12 +412,12 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index c12a83df1b..69a7b27012 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -26,6 +26,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource @@ -573,6 +574,7 @@ type MetricValueStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..99ae748651 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index e2119d5550..232a598158 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,14 +131,14 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // HorizontalPodAutoscaler is the configuration for a horizontal pod @@ -148,7 +148,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -172,7 +172,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -188,7 +188,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -233,7 +233,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -355,18 +355,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -379,18 +379,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -403,13 +403,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -420,13 +420,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -450,7 +450,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -474,6 +474,6 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 41f7a16ea1..c88fc1fe26 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -289,7 +289,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -389,12 +389,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -408,12 +408,12 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index c4a8db6e78..cb5cbb6002 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -17,5 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 833b118d00..f5a9385f5e 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -34,7 +34,7 @@ message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +52,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -113,15 +113,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // Job represents the configuration of a single job. @@ -129,7 +129,7 @@ message Job { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -152,11 +152,11 @@ message JobCondition { // Last time the condition was checked. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -172,7 +172,7 @@ message JobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Jobs. repeated Job items = 2; @@ -213,8 +213,6 @@ message JobSpec { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is beta-level. It can be used when the `JobPodFailurePolicy` - // feature gate is enabled (enabled by default). // +optional optional PodFailurePolicy podFailurePolicy = 11; @@ -224,8 +222,8 @@ message JobSpec { // When the field is specified, it must be immutable and works only for the Indexed Jobs. // Once the Job meets the SuccessPolicy, the lingering pods are terminated. // - // This field is alpha-level. To use this field, you must enable the - // `JobSuccessPolicy` feature gate (disabled by default). + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). // +optional optional SuccessPolicy successPolicy = 16; @@ -262,7 +260,7 @@ message JobSpec { // Normally, the system sets this field for you. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -280,7 +278,7 @@ message JobSpec { // Describes the pod that will be created when executing a job. // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - optional k8s.io.api.core.v1.PodTemplateSpec template = 6; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 6; // ttlSecondsAfterFinished limits the lifetime of a Job that has finished // execution (either Complete or Failed). If this field is set, @@ -349,7 +347,8 @@ message JobSpec { // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - // all characters before the first "/" must be a valid subdomain as defined // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path - // characters as defined by RFC 3986. The value cannot exceed 64 characters. + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. // // This field is alpha-level. The job controller accepts setting the field // when the feature gate JobManagedBy is enabled (disabled by default). @@ -387,7 +386,7 @@ message JobStatus { // The field cannot be modified while the job is unsuspended or finished. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. @@ -396,7 +395,7 @@ message JobStatus { // The value cannot be updated or removed. The value indicates the same or // later point in time as the startTime field. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; // The number of pending and running pods which are not terminating (without // a deletionTimestamp). @@ -466,8 +465,8 @@ message JobStatus { // +optional optional UncountedTerminatedPods uncountedTerminatedPods = 8; - // The number of pods which have a Ready condition. - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). optional int32 ready = 9; } @@ -476,7 +475,7 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 49b0ec6441..b42ec231e4 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -64,6 +64,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Job represents the configuration of a single job. type Job struct { @@ -85,6 +86,7 @@ type Job struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // JobList is a collection of jobs. type JobList struct { @@ -174,7 +176,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // When specified, it should match one the container or initContainer // names in the pod template. // +optional - ContainerName *string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are @@ -234,14 +236,14 @@ type PodFailurePolicyRule struct { // Represents the requirement on the container exit codes. // +optional - OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes" protobuf:"bytes,2,opt,name=onExitCodes"` + OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"` // Represents the requirement on the pod conditions. The requirement is represented // as a list of pod condition patterns. The requirement is satisfied if at // least one pattern matches an actual pod condition. At most 20 elements are allowed. // +listType=atomic // +optional - OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"` + OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"` } // PodFailurePolicy describes how failed pods influence the backoffLimit. @@ -336,8 +338,6 @@ type JobSpec struct { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is beta-level. It can be used when the `JobPodFailurePolicy` - // feature gate is enabled (enabled by default). // +optional PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` @@ -347,8 +347,8 @@ type JobSpec struct { // When the field is specified, it must be immutable and works only for the Indexed Jobs. // Once the Job meets the SuccessPolicy, the lingering pods are terminated. // - // This field is alpha-level. To use this field, you must enable the - // `JobSuccessPolicy` feature gate (disabled by default). + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). // +optional SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` @@ -477,7 +477,8 @@ type JobSpec struct { // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - // all characters before the first "/" must be a valid subdomain as defined // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path - // characters as defined by RFC 3986. The value cannot exceed 64 characters. + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. // // This field is alpha-level. The job controller accepts setting the field // when the feature gate JobManagedBy is enabled (disabled by default). @@ -594,8 +595,8 @@ type JobStatus struct { // +optional UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` - // The number of pods which have a Ready condition. - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } @@ -633,7 +634,6 @@ const ( // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to // a failed pod matching a pod failure policy rule // https://kep.k8s.io/3329 - // This is currently a beta field. JobReasonPodFailurePolicy string = "PodFailurePolicy" // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of // times higher than backOffLimit times. @@ -649,8 +649,13 @@ const ( // JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to // a Job met successPolicy. // https://kep.k8s.io/3998 - // This is currently an alpha field. + // This is currently a beta field. JobReasonSuccessPolicy string = "SuccessPolicy" + // JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to + // a number of succeeded Job pods met completions. + // - https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonCompletionsReached string = "CompletionsReached" ) // JobCondition describes current state of a job. @@ -688,6 +693,7 @@ type JobTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJob represents the configuration of a single cron job. type CronJob struct { @@ -709,6 +715,7 @@ type CronJob struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJobList is a collection of cron jobs. type CronJobList struct { diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 84073b8d86..d504887884 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -115,8 +115,8 @@ var map_JobSpec = map[string]string{ "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", - "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", - "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is alpha-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (disabled by default).", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", + "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", @@ -127,7 +127,7 @@ var map_JobSpec = map[string]string{ "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", - "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 64 characters.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", } func (JobSpec) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ var map_JobStatus = map[string]string{ "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", - "ready": "The number of pods which have a Ready condition.", + "ready": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).", } func (JobStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b76cb09249 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJob) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJobList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Job) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *JobList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index ac774f19ad..6dd322128d 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -35,7 +35,7 @@ message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -53,7 +53,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -116,15 +116,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // JobTemplateSpec describes the data a Job should have when created from a template @@ -132,11 +132,11 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional - optional k8s.io.api.batch.v1.JobSpec spec = 2; + optional .k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/certificates/v1/doc.go b/vendor/k8s.io/api/certificates/v1/doc.go index fe3ea3af87..78434478e8 100644 --- a/vendor/k8s.io/api/certificates/v1/doc.go +++ b/vendor/k8s.io/api/certificates/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=certificates.k8s.io package v1 // import "k8s.io/api/certificates/v1" diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto index 968cc2564c..dac7c7f5f2 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "k8s.io/api/certificates/v1"; // or to obtain certificates from custom non-Kubernetes signers. message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -87,19 +87,19 @@ message CertificateSigningRequestCondition { // lastUpdateTime is the time of the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } // CertificateSigningRequestList is a collection of CertificateSigningRequest objects message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a collection of CertificateSigningRequest objects repeated CertificateSigningRequest items = 2; diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index 92b2018e76..ba8009840d 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -27,6 +27,7 @@ import ( // +genclient:nonNamespaced // +genclient:method=UpdateApproval,verb=update,subresource=approval,input=k8s.io/api/certificates/v1.CertificateSigningRequest,result=k8s.io/api/certificates/v1.CertificateSigningRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequest objects provide a mechanism to obtain x509 certificates // by submitting a certificate signing request, and having it asynchronously approved and issued. @@ -262,6 +263,7 @@ type CertificateSigningRequestCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequestList is a collection of CertificateSigningRequest objects type CertificateSigningRequestList struct { diff --git a/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..3a2b274030 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequestList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto index b0ebc4bd45..7155f778cf 100644 --- a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -46,7 +46,7 @@ option go_package = "k8s.io/api/certificates/v1alpha1"; message ClusterTrustBundle { // metadata contains the object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the signer (if any) and trust anchors. optional ClusterTrustBundleSpec spec = 2; @@ -57,7 +57,7 @@ message ClusterTrustBundleList { // metadata contains the list metadata. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a collection of ClusterTrustBundle objects repeated ClusterTrustBundle items = 2; diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index f70f01ef7a..f3ec4c06e4 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/certificates/v1beta1"; // Describes a certificate signing request message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -65,18 +65,18 @@ message CertificateSigningRequestCondition { // timestamp for the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated CertificateSigningRequest items = 2; } diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go index fc2f4f2c6e..9b2fbbda3a 100644 --- a/vendor/k8s.io/api/coordination/v1/doc.go +++ b/vendor/k8s.io/api/coordination/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=coordination.k8s.io diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index 8b7ab98caa..cf6702aef3 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -139,40 +139,44 @@ func init() { } var fileDescriptor_239d5a4df3139dce = []byte{ - // 524 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6f, 0xd3, 0x30, - 0x18, 0xc6, 0x9b, 0xb5, 0x95, 0x56, 0x97, 0x8d, 0x2a, 0xea, 0x21, 0xea, 0x21, 0x19, 0x95, 0x90, - 0x26, 0x24, 0x1c, 0x3a, 0x21, 0x84, 0x38, 0x8d, 0x08, 0x01, 0x93, 0x3a, 0x21, 0x65, 0x3b, 0xa1, - 0x1d, 0x70, 0x93, 0x97, 0xd4, 0x74, 0x89, 0x83, 0xed, 0x16, 0xed, 0xc6, 0x47, 0xe0, 0xca, 0xc7, - 0x80, 0x4f, 0xd1, 0xe3, 0x8e, 0x3b, 0x45, 0xd4, 0x7c, 0x11, 0x64, 0xb7, 0x5b, 0x4b, 0xff, 0x68, - 0xd3, 0x6e, 0xf1, 0xeb, 0xe7, 0xf9, 0xbd, 0x8f, 0x9f, 0x43, 0xd0, 0x93, 0xc1, 0x4b, 0x81, 0x29, - 0xf3, 0x49, 0x4e, 0xfd, 0x88, 0x31, 0x1e, 0xd3, 0x8c, 0x48, 0xca, 0x32, 0x7f, 0xd4, 0xf1, 0x13, - 0xc8, 0x80, 0x13, 0x09, 0x31, 0xce, 0x39, 0x93, 0xcc, 0x6e, 0x4d, 0xb5, 0x98, 0xe4, 0x14, 0x2f, - 0x6a, 0xf1, 0xa8, 0xd3, 0x7a, 0x9a, 0x50, 0xd9, 0x1f, 0xf6, 0x70, 0xc4, 0x52, 0x3f, 0x61, 0x09, - 0xf3, 0x8d, 0xa5, 0x37, 0xfc, 0x6c, 0x4e, 0xe6, 0x60, 0xbe, 0xa6, 0xa8, 0xd6, 0xf3, 0xf9, 0xda, - 0x94, 0x44, 0x7d, 0x9a, 0x01, 0xbf, 0xf0, 0xf3, 0x41, 0xa2, 0x07, 0xc2, 0x4f, 0x41, 0x92, 0x35, - 0x01, 0x5a, 0xfe, 0x26, 0x17, 0x1f, 0x66, 0x92, 0xa6, 0xb0, 0x62, 0x78, 0x71, 0x9b, 0x41, 0x44, - 0x7d, 0x48, 0xc9, 0xb2, 0xaf, 0xfd, 0xdb, 0x42, 0xd5, 0x2e, 0x10, 0x01, 0xf6, 0x27, 0xb4, 0xad, - 0xd3, 0xc4, 0x44, 0x12, 0xc7, 0xda, 0xb3, 0xf6, 0xeb, 0x07, 0xcf, 0xf0, 0xbc, 0x86, 0x1b, 0x28, - 0xce, 0x07, 0x89, 0x1e, 0x08, 0xac, 0xd5, 0x78, 0xd4, 0xc1, 0x1f, 0x7a, 0x5f, 0x20, 0x92, 0xc7, - 0x20, 0x49, 0x60, 0x8f, 0x0b, 0xaf, 0xa4, 0x0a, 0x0f, 0xcd, 0x67, 0xe1, 0x0d, 0xd5, 0x7e, 0x87, - 0x2a, 0x22, 0x87, 0xc8, 0xd9, 0x32, 0xf4, 0xc7, 0x78, 0x73, 0xc9, 0xd8, 0x44, 0x3a, 0xc9, 0x21, - 0x0a, 0x1e, 0xcc, 0x90, 0x15, 0x7d, 0x0a, 0x0d, 0xa0, 0xfd, 0xcb, 0x42, 0x35, 0xa3, 0xe8, 0x52, - 0x21, 0xed, 0xb3, 0x95, 0xe0, 0xf8, 0x6e, 0xc1, 0xb5, 0xdb, 0xc4, 0x6e, 0xcc, 0x76, 0x6c, 0x5f, - 0x4f, 0x16, 0x42, 0xbf, 0x45, 0x55, 0x2a, 0x21, 0x15, 0xce, 0xd6, 0x5e, 0x79, 0xbf, 0x7e, 0xf0, - 0xe8, 0xd6, 0xd4, 0xc1, 0xce, 0x8c, 0x56, 0x3d, 0xd2, 0xbe, 0x70, 0x6a, 0x6f, 0xff, 0x2c, 0xcf, - 0x32, 0xeb, 0x77, 0xd8, 0xaf, 0xd0, 0x6e, 0x9f, 0x9d, 0xc7, 0xc0, 0x8f, 0x62, 0xc8, 0x24, 0x95, - 0x17, 0x26, 0x79, 0x2d, 0xb0, 0x55, 0xe1, 0xed, 0xbe, 0xff, 0xef, 0x26, 0x5c, 0x52, 0xda, 0x5d, - 0xd4, 0x3c, 0xd7, 0xa0, 0x37, 0x43, 0x6e, 0x36, 0x9f, 0x40, 0xc4, 0xb2, 0x58, 0x98, 0x5a, 0xab, - 0x81, 0xa3, 0x0a, 0xaf, 0xd9, 0x5d, 0x73, 0x1f, 0xae, 0x75, 0xd9, 0x3d, 0x54, 0x27, 0xd1, 0xd7, - 0x21, 0xe5, 0x70, 0x4a, 0x53, 0x70, 0xca, 0xa6, 0x40, 0xff, 0x6e, 0x05, 0x1e, 0xd3, 0x88, 0x33, - 0x6d, 0x0b, 0x1e, 0xaa, 0xc2, 0xab, 0xbf, 0x9e, 0x73, 0xc2, 0x45, 0xa8, 0x7d, 0x86, 0x6a, 0x1c, - 0x32, 0xf8, 0x66, 0x36, 0x54, 0xee, 0xb7, 0x61, 0x47, 0x15, 0x5e, 0x2d, 0xbc, 0xa6, 0x84, 0x73, - 0xa0, 0x7d, 0x88, 0x1a, 0xe6, 0x65, 0xa7, 0x9c, 0x64, 0x82, 0xea, 0xb7, 0x09, 0xa7, 0x6a, 0xba, - 0x68, 0xaa, 0xc2, 0x6b, 0x74, 0x97, 0xee, 0xc2, 0x15, 0x75, 0x70, 0x38, 0x9e, 0xb8, 0xa5, 0xcb, - 0x89, 0x5b, 0xba, 0x9a, 0xb8, 0xa5, 0xef, 0xca, 0xb5, 0xc6, 0xca, 0xb5, 0x2e, 0x95, 0x6b, 0x5d, - 0x29, 0xd7, 0xfa, 0xa3, 0x5c, 0xeb, 0xc7, 0x5f, 0xb7, 0xf4, 0xb1, 0xb5, 0xf9, 0x07, 0xf2, 0x2f, - 0x00, 0x00, 0xff, 0xff, 0xb0, 0xb0, 0x3a, 0x46, 0x5d, 0x04, 0x00, 0x00, + // 588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x40, + 0x14, 0xc6, 0xb7, 0xb0, 0xab, 0xec, 0xac, 0xfc, 0xc9, 0xc8, 0x45, 0xb3, 0x17, 0x2d, 0x92, 0x98, + 0x10, 0x13, 0xa7, 0x42, 0x8c, 0x31, 0x26, 0x26, 0x58, 0x89, 0x4a, 0xb2, 0x44, 0x53, 0xb8, 0x32, + 0x5c, 0x38, 0xdb, 0x1e, 0xba, 0x23, 0xb4, 0x53, 0x67, 0x66, 0x31, 0xdc, 0xf9, 0x08, 0x3e, 0x81, + 0xef, 0xa0, 0x4f, 0xc1, 0x25, 0x97, 0x5c, 0x35, 0x32, 0xbe, 0x85, 0x57, 0x66, 0x66, 0x0b, 0x0b, + 0xcb, 0x6e, 0x20, 0xde, 0x75, 0xce, 0x39, 0xdf, 0xef, 0x7c, 0x73, 0x4e, 0x5b, 0xf4, 0x68, 0xff, + 0xb9, 0x24, 0x8c, 0x07, 0xb4, 0x60, 0x41, 0xcc, 0xb9, 0x48, 0x58, 0x4e, 0x15, 0xe3, 0x79, 0x70, + 0xb8, 0x1a, 0xa4, 0x90, 0x83, 0xa0, 0x0a, 0x12, 0x52, 0x08, 0xae, 0x38, 0x6e, 0x0f, 0x6a, 0x09, + 0x2d, 0x18, 0xb9, 0x5c, 0x4b, 0x0e, 0x57, 0xdb, 0x8f, 0x53, 0xa6, 0x7a, 0xfd, 0x2e, 0x89, 0x79, + 0x16, 0xa4, 0x3c, 0xe5, 0x81, 0x95, 0x74, 0xfb, 0x7b, 0xf6, 0x64, 0x0f, 0xf6, 0x69, 0x80, 0x6a, + 0x3f, 0x1d, 0xb6, 0xcd, 0x68, 0xdc, 0x63, 0x39, 0x88, 0xa3, 0xa0, 0xd8, 0x4f, 0x4d, 0x40, 0x06, + 0x19, 0x28, 0x3a, 0xc6, 0x40, 0x3b, 0x98, 0xa4, 0x12, 0xfd, 0x5c, 0xb1, 0x0c, 0xae, 0x09, 0x9e, + 0xdd, 0x24, 0x90, 0x71, 0x0f, 0x32, 0x3a, 0xaa, 0x5b, 0xfe, 0xe5, 0xa0, 0x46, 0x07, 0xa8, 0x04, + 0xfc, 0x09, 0xcd, 0x18, 0x37, 0x09, 0x55, 0xd4, 0x75, 0x96, 0x9c, 0x95, 0xd6, 0xda, 0x13, 0x32, + 0x1c, 0xc3, 0x05, 0x94, 0x14, 0xfb, 0xa9, 0x09, 0x48, 0x62, 0xaa, 0xc9, 0xe1, 0x2a, 0x79, 0xdf, + 0xfd, 0x0c, 0xb1, 0xda, 0x02, 0x45, 0x43, 0x7c, 0x5c, 0xfa, 0x35, 0x5d, 0xfa, 0x68, 0x18, 0x8b, + 0x2e, 0xa8, 0xf8, 0x2d, 0xaa, 0xcb, 0x02, 0x62, 0x77, 0xca, 0xd2, 0x1f, 0x92, 0xc9, 0x43, 0x26, + 0xd6, 0xd2, 0x76, 0x01, 0x71, 0x78, 0xaf, 0x42, 0xd6, 0xcd, 0x29, 0xb2, 0x80, 0xe5, 0x9f, 0x0e, + 0x6a, 0xda, 0x8a, 0x0e, 0x93, 0x0a, 0xef, 0x5e, 0x33, 0x4e, 0x6e, 0x67, 0xdc, 0xa8, 0xad, 0xed, + 0x85, 0xaa, 0xc7, 0xcc, 0x79, 0xe4, 0x92, 0xe9, 0x37, 0xa8, 0xc1, 0x14, 0x64, 0xd2, 0x9d, 0x5a, + 0x9a, 0x5e, 0x69, 0xad, 0x3d, 0xb8, 0xd1, 0x75, 0x38, 0x5b, 0xd1, 0x1a, 0x9b, 0x46, 0x17, 0x0d, + 0xe4, 0xcb, 0x3f, 0xea, 0x95, 0x67, 0x73, 0x0f, 0xfc, 0x02, 0xcd, 0xf5, 0xf8, 0x41, 0x02, 0x62, + 0x33, 0x81, 0x5c, 0x31, 0x75, 0x64, 0x9d, 0x37, 0x43, 0xac, 0x4b, 0x7f, 0xee, 0xdd, 0x95, 0x4c, + 0x34, 0x52, 0x89, 0x3b, 0x68, 0xf1, 0xc0, 0x80, 0x36, 0xfa, 0xc2, 0x76, 0xde, 0x86, 0x98, 0xe7, + 0x89, 0xb4, 0x63, 0x6d, 0x84, 0xae, 0x2e, 0xfd, 0xc5, 0xce, 0x98, 0x7c, 0x34, 0x56, 0x85, 0xbb, + 0xa8, 0x45, 0xe3, 0x2f, 0x7d, 0x26, 0x60, 0x87, 0x65, 0xe0, 0x4e, 0xdb, 0x01, 0x06, 0xb7, 0x1b, + 0xe0, 0x16, 0x8b, 0x05, 0x37, 0xb2, 0x70, 0x5e, 0x97, 0x7e, 0xeb, 0xd5, 0x90, 0x13, 0x5d, 0x86, + 0xe2, 0x5d, 0xd4, 0x14, 0x90, 0xc3, 0x57, 0xdb, 0xa1, 0xfe, 0x7f, 0x1d, 0x66, 0x75, 0xe9, 0x37, + 0xa3, 0x73, 0x4a, 0x34, 0x04, 0xe2, 0x75, 0xb4, 0x60, 0x6f, 0xb6, 0x23, 0x68, 0x2e, 0x99, 0xb9, + 0x9b, 0x74, 0x1b, 0x76, 0x16, 0x8b, 0xba, 0xf4, 0x17, 0x3a, 0x23, 0xb9, 0xe8, 0x5a, 0x35, 0xde, + 0x40, 0x33, 0x52, 0x99, 0xaf, 0x22, 0x3d, 0x72, 0xef, 0xd8, 0x3d, 0xac, 0x98, 0xb7, 0x61, 0xbb, + 0x8a, 0xfd, 0x2d, 0x7d, 0xf7, 0xf5, 0xf9, 0xaa, 0x21, 0x19, 0x6c, 0xb1, 0xca, 0x45, 0x17, 0x4a, + 0xfc, 0x12, 0xcd, 0x17, 0x02, 0xf6, 0x40, 0x08, 0x48, 0x06, 0x2b, 0x74, 0xef, 0x5a, 0xd8, 0x7d, + 0x5d, 0xfa, 0xf3, 0x1f, 0xae, 0xa6, 0xa2, 0xd1, 0xda, 0x70, 0xfd, 0xf8, 0xcc, 0xab, 0x9d, 0x9c, + 0x79, 0xb5, 0xd3, 0x33, 0xaf, 0xf6, 0x4d, 0x7b, 0xce, 0xb1, 0xf6, 0x9c, 0x13, 0xed, 0x39, 0xa7, + 0xda, 0x73, 0x7e, 0x6b, 0xcf, 0xf9, 0xfe, 0xc7, 0xab, 0x7d, 0x6c, 0x4f, 0xfe, 0x8b, 0xfd, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0xf8, 0xf4, 0xd4, 0x78, 0xe2, 0x04, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -285,6 +289,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -394,6 +412,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -440,6 +466,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -859,6 +887,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 36fce60f2d..4d4f7e08f4 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/coordination/v1"; message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -45,7 +45,7 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Lease items = 2; @@ -54,27 +54,43 @@ message LeaseList { // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last + // to wait to force acquire it. This is measured against the time of last // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index b0e1d06829..5307cea88f 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -20,8 +20,21 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type CoordinatedLeaseStrategy string + +// CoordinatedLeaseStrategy defines the strategy for picking the leader for coordinated leader election. +const ( + // OldestEmulationVersion picks the oldest LeaseCandidate, where "oldest" is defined as follows + // 1) Select the candidate(s) with the lowest emulation version + // 2) If multiple candidates have the same emulation version, select the candidate(s) with the lowest binary version. (Note that binary version must be greater or equal to emulation version) + // 3) If multiple candidates have the same binary version, select the candidate with the oldest creationTimestamp. + // If a candidate does not specify the emulationVersion and binaryVersion fields, it will not be considered a candidate for the lease. + OldestEmulationVersion CoordinatedLeaseStrategy = "OldestEmulationVersion" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // Lease defines a lease concept. type Lease struct { @@ -39,10 +52,12 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last + // to wait to force acquire it. This is measured against the time of last // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` @@ -57,9 +72,22 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LeaseList is a list of Lease objects. type LeaseList struct { diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index f3720eca02..6c1a7ea8b9 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measured against the time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election. If the field is not specified, there is no active coordination for this lease. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up. This field can only be set if Strategy is also set.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 99f6b0be7a..4d549cc99f 100644 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -111,6 +111,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..a22632cba9 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Lease) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go b/vendor/k8s.io/api/coordination/v1alpha1/doc.go similarity index 66% rename from vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go rename to vendor/k8s.io/api/coordination/v1alpha1/doc.go index a01d603fe5..33a0b0ea97 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go +++ b/vendor/k8s.io/api/coordination/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* -Package ratelimiter defines rate limiters used by Controllers to limit how frequently requests may be queued. +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true -Typical rate limiters that can be used are implemented in client-go's workqueue package. -*/ -package ratelimiter +// +groupName=coordination.k8s.io + +package v1alpha1 // import "k8s.io/api/coordination/v1alpha1" diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..9e072e62d0 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go @@ -0,0 +1,1036 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/coordination/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} } +func (*LeaseCandidate) ProtoMessage() {} +func (*LeaseCandidate) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{0} +} +func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidate) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidate.Merge(m, src) +} +func (m *LeaseCandidate) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidate) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidate.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo + +func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} } +func (*LeaseCandidateList) ProtoMessage() {} +func (*LeaseCandidateList) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{1} +} +func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateList.Merge(m, src) +} +func (m *LeaseCandidateList) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo + +func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} } +func (*LeaseCandidateSpec) ProtoMessage() {} +func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{2} +} +func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateSpec.Merge(m, src) +} +func (m *LeaseCandidateSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidate") + proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateList") + proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/coordination/v1alpha1/generated.proto", fileDescriptor_cb9e87df9da593c2) +} + +var fileDescriptor_cb9e87df9da593c2 = []byte{ + // 570 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcb, 0x6e, 0xd3, 0x4c, + 0x14, 0xc7, 0xe3, 0x36, 0xf9, 0x94, 0xcc, 0xd7, 0xa0, 0x30, 0x15, 0x92, 0x95, 0x85, 0x13, 0x65, + 0x55, 0x21, 0x31, 0x6e, 0xa0, 0x42, 0x48, 0xec, 0x5c, 0x40, 0x42, 0x4a, 0x4b, 0xe5, 0x42, 0x25, + 0x50, 0x17, 0x4c, 0xec, 0x53, 0x67, 0x48, 0x7c, 0xd1, 0x78, 0x52, 0x94, 0x5d, 0x17, 0x3c, 0x00, + 0x8f, 0x15, 0x58, 0x75, 0xd9, 0x55, 0x44, 0xcc, 0x5b, 0xb0, 0x42, 0x33, 0xb1, 0x73, 0x6d, 0x94, + 0x88, 0x5d, 0xce, 0xe5, 0xf7, 0x3f, 0xe7, 0x7f, 0xac, 0x0c, 0x3a, 0xec, 0xbe, 0x88, 0x09, 0x0b, + 0x4d, 0x1a, 0x31, 0xd3, 0x09, 0x43, 0xee, 0xb2, 0x80, 0x0a, 0x16, 0x06, 0xe6, 0x75, 0x93, 0xf6, + 0xa2, 0x0e, 0x6d, 0x9a, 0x1e, 0x04, 0xc0, 0xa9, 0x00, 0x97, 0x44, 0x3c, 0x14, 0x21, 0xae, 0x4f, + 0x08, 0x42, 0x23, 0x46, 0xe6, 0x09, 0x92, 0x11, 0xd5, 0x27, 0x1e, 0x13, 0x9d, 0x7e, 0x9b, 0x38, + 0xa1, 0x6f, 0x7a, 0xa1, 0x17, 0x9a, 0x0a, 0x6c, 0xf7, 0xaf, 0x54, 0xa4, 0x02, 0xf5, 0x6b, 0x22, + 0x58, 0x7d, 0xbc, 0x7e, 0x85, 0xe5, 0xe1, 0xd5, 0xa3, 0x59, 0xaf, 0x4f, 0x9d, 0x0e, 0x0b, 0x80, + 0x0f, 0xcc, 0xa8, 0xeb, 0xc9, 0x44, 0x6c, 0xfa, 0x20, 0xe8, 0x7d, 0x94, 0xb9, 0x8e, 0xe2, 0xfd, + 0x40, 0x30, 0x1f, 0x56, 0x80, 0xe7, 0x9b, 0x80, 0xd8, 0xe9, 0x80, 0x4f, 0x97, 0xb9, 0xc6, 0x4f, + 0x0d, 0x3d, 0x68, 0x01, 0x8d, 0xe1, 0x98, 0x06, 0x2e, 0x73, 0xa9, 0x00, 0xfc, 0x19, 0x15, 0xe5, + 0x5a, 0x2e, 0x15, 0x54, 0xd7, 0xea, 0xda, 0xc1, 0xff, 0x4f, 0x0f, 0xc9, 0xec, 0x82, 0x53, 0x75, + 0x12, 0x75, 0x3d, 0x99, 0x88, 0x89, 0xec, 0x26, 0xd7, 0x4d, 0xf2, 0xae, 0xfd, 0x05, 0x1c, 0x71, + 0x02, 0x82, 0x5a, 0x78, 0x38, 0xaa, 0xe5, 0x92, 0x51, 0x0d, 0xcd, 0x72, 0xf6, 0x54, 0x15, 0x5f, + 0xa0, 0x7c, 0x1c, 0x81, 0xa3, 0xef, 0x28, 0xf5, 0x23, 0xb2, 0xe9, 0xfb, 0x90, 0xc5, 0x0d, 0xcf, + 0x23, 0x70, 0xac, 0xbd, 0x74, 0x42, 0x5e, 0x46, 0xb6, 0xd2, 0x6b, 0xfc, 0xd0, 0x10, 0x5e, 0x6c, + 0x6d, 0xb1, 0x58, 0xe0, 0xcb, 0x15, 0x43, 0x64, 0x3b, 0x43, 0x92, 0x56, 0x76, 0x2a, 0xe9, 0xb0, + 0x62, 0x96, 0x99, 0x33, 0xf3, 0x01, 0x15, 0x98, 0x00, 0x3f, 0xd6, 0x77, 0xea, 0xbb, 0x4b, 0xb7, + 0xda, 0xca, 0x8d, 0x55, 0x4e, 0xc5, 0x0b, 0x6f, 0xa5, 0x8c, 0x3d, 0x51, 0x6b, 0x7c, 0xcb, 0x2f, + 0x7b, 0x91, 0x46, 0xb1, 0x89, 0x4a, 0x3d, 0x99, 0x3d, 0xa5, 0x3e, 0x28, 0x33, 0x25, 0xeb, 0x61, + 0xca, 0x97, 0x5a, 0x59, 0xc1, 0x9e, 0xf5, 0xe0, 0x8f, 0xa8, 0x18, 0xb1, 0xc0, 0x7b, 0xcf, 0x7c, + 0x48, 0xef, 0x6d, 0x6e, 0x67, 0xfe, 0x84, 0x39, 0x3c, 0x94, 0x98, 0xb5, 0x27, 0x9d, 0x9f, 0xa5, + 0x22, 0xf6, 0x54, 0x0e, 0x5f, 0xa2, 0x12, 0x87, 0x00, 0xbe, 0x2a, 0xed, 0xdd, 0x7f, 0xd3, 0x2e, + 0xcb, 0xc5, 0xed, 0x4c, 0xc5, 0x9e, 0x09, 0xe2, 0x97, 0xa8, 0xdc, 0x66, 0x01, 0xe5, 0x83, 0x0b, + 0xe0, 0x31, 0x0b, 0x03, 0x3d, 0xaf, 0xdc, 0x3e, 0x4a, 0xdd, 0x96, 0xad, 0xf9, 0xa2, 0xbd, 0xd8, + 0x8b, 0x5f, 0xa1, 0x0a, 0xf8, 0xfd, 0x9e, 0x3a, 0x7c, 0xc6, 0x17, 0x14, 0xaf, 0xa7, 0x7c, 0xe5, + 0xf5, 0x52, 0xdd, 0x5e, 0x21, 0xf0, 0x8d, 0x86, 0xf6, 0x23, 0x0e, 0x57, 0xc0, 0x39, 0xb8, 0xe7, + 0x42, 0xfe, 0x6f, 0x3c, 0x06, 0xb1, 0xfe, 0x5f, 0x7d, 0xf7, 0xa0, 0x64, 0x9d, 0x26, 0xa3, 0xda, + 0xfe, 0xd9, 0x6a, 0xf9, 0xcf, 0xa8, 0xf6, 0x6c, 0xfd, 0x03, 0x41, 0x8e, 0xb3, 0x18, 0x5c, 0xf5, + 0xc1, 0x52, 0x70, 0x60, 0xdf, 0x37, 0xca, 0x7a, 0x33, 0x1c, 0x1b, 0xb9, 0xdb, 0xb1, 0x91, 0xbb, + 0x1b, 0x1b, 0xb9, 0x9b, 0xc4, 0xd0, 0x86, 0x89, 0xa1, 0xdd, 0x26, 0x86, 0x76, 0x97, 0x18, 0xda, + 0xaf, 0xc4, 0xd0, 0xbe, 0xff, 0x36, 0x72, 0x9f, 0xea, 0x9b, 0xde, 0xc4, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x05, 0x28, 0x49, 0xd9, 0x36, 0x05, 0x00, 0x00, +} + +func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PreferredStrategies) > 0 { + for iNdEx := len(m.PreferredStrategies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PreferredStrategies[iNdEx]) + copy(dAtA[i:], m.PreferredStrategies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreferredStrategies[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.EmulationVersion) + copy(dAtA[i:], m.EmulationVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.BinaryVersion) + copy(dAtA[i:], m.BinaryVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion))) + i-- + dAtA[i] = 0x22 + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.PingTime != nil { + { + size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LeaseName) + copy(dAtA[i:], m.LeaseName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LeaseCandidate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseCandidateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseCandidateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LeaseName) + n += 1 + l + sovGenerated(uint64(l)) + if m.PingTime != nil { + l = m.PingTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.BinaryVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EmulationVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredStrategies) > 0 { + for _, s := range m.PreferredStrategies { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LeaseCandidate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LeaseCandidate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseCandidateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidateSpec{`, + `LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`, + `PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`, + `EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`, + `PreferredStrategies:` + fmt.Sprintf("%v", this.PreferredStrategies) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LeaseCandidate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LeaseCandidate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeaseName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PingTime == nil { + m.PingTime = &v1.MicroTime{} + } + if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinaryVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmulationVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredStrategies", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredStrategies = append(m.PreferredStrategies, k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto new file mode 100644 index 0000000000..57895ad569 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.coordination.v1alpha1; + +import "k8s.io/api/coordination/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/coordination/v1alpha1"; + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +message LeaseCandidate { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseCandidateSpec spec = 2; +} + +// LeaseCandidateList is a list of Lease objects. +message LeaseCandidateList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated LeaseCandidate items = 2; +} + +// LeaseCandidateSpec is a specification of a Lease. +message LeaseCandidateSpec { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + optional string leaseName = 1; + + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2; + + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3; + + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string binaryVersion = 4; + + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string emulationVersion = 5; + + // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. + // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated + // leader election to make a decision about the final election strategy. This follows as + // - If all clients have strategy X as the first element in this list, strategy X will be used. + // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y + // will be used. + // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader + // election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +listType=atomic + // +required + repeated string preferredStrategies = 6; +} + diff --git a/vendor/k8s.io/api/coordination/v1alpha1/register.go b/vendor/k8s.io/api/coordination/v1alpha1/register.go new file mode 100644 index 0000000000..6e57905a19 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "coordination.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &LeaseCandidate{}, + &LeaseCandidateList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types.go b/vendor/k8s.io/api/coordination/v1alpha1/types.go new file mode 100644 index 0000000000..14066600cf --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +type LeaseCandidate struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseCandidateSpec is a specification of a Lease. +type LeaseCandidateSpec struct { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"` + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"` + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"` + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + BinaryVersion string `json:"binaryVersion,omitempty" protobuf:"bytes,4,opt,name=binaryVersion"` + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"` + // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. + // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated + // leader election to make a decision about the final election strategy. This follows as + // - If all clients have strategy X as the first element in this list, strategy X will be used. + // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y + // will be used. + // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader + // election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +listType=atomic + // +required + PreferredStrategies []v1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty" protobuf:"bytes,6,opt,name=preferredStrategies"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// LeaseCandidateList is a list of Lease objects. +type LeaseCandidateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..0e52809c8c --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_LeaseCandidate = map[string]string{ + "": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (LeaseCandidate) SwaggerDoc() map[string]string { + return map_LeaseCandidate +} + +var map_LeaseCandidateList = map[string]string{ + "": "LeaseCandidateList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (LeaseCandidateList) SwaggerDoc() map[string]string { + return map_LeaseCandidateList +} + +var map_LeaseCandidateSpec = map[string]string{ + "": "LeaseCandidateSpec is a specification of a Lease.", + "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", + "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", + "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", + "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"", + "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", + "preferredStrategies": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", +} + +func (LeaseCandidateSpec) SwaggerDoc() map[string]string { + return map_LeaseCandidateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9cf15d21dc --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,116 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/coordination/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate. +func (in *LeaseCandidate) DeepCopy() *LeaseCandidate { + if in == nil { + return nil + } + out := new(LeaseCandidate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LeaseCandidate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList. +func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList { + if in == nil { + return nil + } + out := new(LeaseCandidateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) { + *out = *in + if in.PingTime != nil { + in, out := &in.PingTime, &out.PingTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + if in.PreferredStrategies != nil { + in, out := &in.PreferredStrategies, &out.PreferredStrategies + *out = make([]v1.CoordinatedLeaseStrategy, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec. +func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec { + if in == nil { + return nil + } + out := new(LeaseCandidateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..f42bef65c9 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 655de56590..bea9b8146a 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -25,6 +25,8 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -139,40 +141,45 @@ func init() { } var fileDescriptor_8d4e223b8bb23da3 = []byte{ - // 527 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0x9b, 0xb5, 0x95, 0x56, 0x97, 0x8d, 0x2a, 0xea, 0x21, 0xea, 0x21, 0x99, 0x7a, 0x40, - 0x13, 0x12, 0x36, 0x9d, 0x10, 0x42, 0x9c, 0x20, 0x02, 0x89, 0x89, 0x4c, 0x48, 0xd9, 0x4e, 0x68, - 0x07, 0xdc, 0xe4, 0x91, 0x9a, 0x2e, 0x71, 0x88, 0xdd, 0xa2, 0xdd, 0xf8, 0x08, 0x5c, 0xf9, 0x22, - 0xf0, 0x15, 0x7a, 0xdc, 0x71, 0xa7, 0x88, 0x9a, 0x2f, 0x82, 0xec, 0x76, 0x6b, 0x69, 0x87, 0x5a, - 0x71, 0x8b, 0x9f, 0xdf, 0xef, 0xf7, 0xfe, 0x7e, 0x87, 0x20, 0x32, 0x7c, 0x26, 0x30, 0xe3, 0x84, - 0xe6, 0x8c, 0x44, 0x9c, 0x17, 0x31, 0xcb, 0xa8, 0x64, 0x3c, 0x23, 0xe3, 0x5e, 0x1f, 0x24, 0xed, - 0x91, 0x04, 0x32, 0x28, 0xa8, 0x84, 0x18, 0xe7, 0x05, 0x97, 0xdc, 0xf6, 0x66, 0x00, 0xa6, 0x39, - 0xc3, 0xcb, 0x00, 0x9e, 0x03, 0x9d, 0x47, 0x09, 0x93, 0x83, 0x51, 0x1f, 0x47, 0x3c, 0x25, 0x09, - 0x4f, 0x38, 0x31, 0x5c, 0x7f, 0xf4, 0xd1, 0x9c, 0xcc, 0xc1, 0x7c, 0xcd, 0x7c, 0x9d, 0x27, 0x8b, - 0x00, 0x29, 0x8d, 0x06, 0x2c, 0x83, 0xe2, 0x92, 0xe4, 0xc3, 0x44, 0x17, 0x04, 0x49, 0x41, 0x52, - 0x32, 0x5e, 0x4b, 0xd1, 0x21, 0xff, 0xa2, 0x8a, 0x51, 0x26, 0x59, 0x0a, 0x6b, 0xc0, 0xd3, 0x4d, - 0x80, 0x88, 0x06, 0x90, 0xd2, 0x55, 0xae, 0xfb, 0xd3, 0x42, 0xf5, 0x00, 0xa8, 0x00, 0xfb, 0x03, - 0xda, 0xd5, 0x69, 0x62, 0x2a, 0xa9, 0x63, 0x1d, 0x58, 0x87, 0xcd, 0xa3, 0xc7, 0x78, 0xb1, 0x8b, - 0x5b, 0x29, 0xce, 0x87, 0x89, 0x2e, 0x08, 0xac, 0xbb, 0xf1, 0xb8, 0x87, 0xdf, 0xf5, 0x3f, 0x41, - 0x24, 0x4f, 0x40, 0x52, 0xdf, 0x9e, 0x94, 0x5e, 0x45, 0x95, 0x1e, 0x5a, 0xd4, 0xc2, 0x5b, 0xab, - 0x1d, 0xa0, 0x9a, 0xc8, 0x21, 0x72, 0x76, 0x8c, 0xfd, 0x21, 0xde, 0xb0, 0x69, 0x6c, 0x72, 0x9d, - 0xe6, 0x10, 0xf9, 0xf7, 0xe6, 0xde, 0x9a, 0x3e, 0x85, 0xc6, 0xd2, 0xfd, 0x61, 0xa1, 0x86, 0xe9, - 0x08, 0x98, 0x90, 0xf6, 0xf9, 0x5a, 0x7a, 0xbc, 0x5d, 0x7a, 0x4d, 0x9b, 0xec, 0xad, 0xf9, 0x8c, - 0xdd, 0x9b, 0xca, 0x52, 0xf2, 0xb7, 0xa8, 0xce, 0x24, 0xa4, 0xc2, 0xd9, 0x39, 0xa8, 0x1e, 0x36, - 0x8f, 0x1e, 0x6c, 0x17, 0xdd, 0xdf, 0x9b, 0x2b, 0xeb, 0xc7, 0x1a, 0x0e, 0x67, 0x8e, 0xee, 0xf7, - 0xea, 0x3c, 0xb8, 0x7e, 0x8c, 0xfd, 0x1c, 0xed, 0x0f, 0xf8, 0x45, 0x0c, 0xc5, 0x71, 0x0c, 0x99, - 0x64, 0xf2, 0xd2, 0xc4, 0x6f, 0xf8, 0xb6, 0x2a, 0xbd, 0xfd, 0x37, 0x7f, 0xdd, 0x84, 0x2b, 0x9d, - 0x76, 0x80, 0xda, 0x17, 0x5a, 0xf4, 0x6a, 0x54, 0x98, 0xf1, 0xa7, 0x10, 0xf1, 0x2c, 0x16, 0x66, - 0xc1, 0x75, 0xdf, 0x51, 0xa5, 0xd7, 0x0e, 0xee, 0xb8, 0x0f, 0xef, 0xa4, 0xec, 0x3e, 0x6a, 0xd2, - 0xe8, 0xf3, 0x88, 0x15, 0x70, 0xc6, 0x52, 0x70, 0xaa, 0x66, 0x8b, 0x64, 0xbb, 0x2d, 0x9e, 0xb0, - 0xa8, 0xe0, 0x1a, 0xf3, 0xef, 0xab, 0xd2, 0x6b, 0xbe, 0x5c, 0x78, 0xc2, 0x65, 0xa9, 0x7d, 0x8e, - 0x1a, 0x05, 0x64, 0xf0, 0xc5, 0x4c, 0xa8, 0xfd, 0xdf, 0x84, 0x3d, 0x55, 0x7a, 0x8d, 0xf0, 0xc6, - 0x12, 0x2e, 0x84, 0xf6, 0x0b, 0xd4, 0x32, 0x2f, 0x3b, 0x2b, 0x68, 0x26, 0x98, 0x7e, 0x9b, 0x70, - 0xea, 0x66, 0x17, 0x6d, 0x55, 0x7a, 0xad, 0x60, 0xe5, 0x2e, 0x5c, 0xeb, 0xf6, 0x5f, 0x4f, 0xa6, - 0x6e, 0xe5, 0x6a, 0xea, 0x56, 0xae, 0xa7, 0x6e, 0xe5, 0xab, 0x72, 0xad, 0x89, 0x72, 0xad, 0x2b, - 0xe5, 0x5a, 0xd7, 0xca, 0xb5, 0x7e, 0x29, 0xd7, 0xfa, 0xf6, 0xdb, 0xad, 0xbc, 0xf7, 0x36, 0xfc, - 0x54, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x81, 0x42, 0xfe, 0x76, 0x04, 0x00, 0x00, + // 600 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e, + 0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1, + 0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae, + 0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f, + 0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61, + 0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d, + 0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e, + 0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8, + 0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf, + 0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe, + 0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f, + 0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26, + 0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51, + 0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51, + 0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26, + 0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1, + 0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89, + 0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73, + 0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57, + 0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe, + 0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f, + 0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83, + 0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76, + 0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8, + 0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f, + 0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66, + 0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea, + 0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b, + 0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd, + 0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc, + 0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee, + 0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e, + 0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05, + 0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee, + 0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea, + 0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed, + 0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -285,6 +292,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -394,6 +415,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -440,6 +469,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -859,6 +890,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 92c8918b80..088811a74b 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.coordination.v1beta1; +import "k8s.io/api/coordination/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -32,7 +33,7 @@ option go_package = "k8s.io/api/coordination/v1beta1"; message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -45,7 +46,7 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Lease items = 2; @@ -54,6 +55,8 @@ message LeaseList { // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; @@ -65,16 +68,28 @@ message LeaseSpec { // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 3a3d5f32e2..d63fc30a9e 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -42,6 +43,8 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need @@ -60,6 +63,16 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 78ca4e393f..50fe8ea189 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index 3adfd87203..dcef1e346a 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -111,6 +112,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(v1.CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 1bdf0b25b1..bc0041b331 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -17,6 +17,8 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName= // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index d52d8da189..5654ee4829 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -497,38 +497,10 @@ func (m *CinderVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo -func (m *ClaimSource) Reset() { *m = ClaimSource{} } -func (*ClaimSource) ProtoMessage() {} -func (*ClaimSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{16} -} -func (m *ClaimSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClaimSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClaimSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClaimSource.Merge(m, src) -} -func (m *ClaimSource) XXX_Size() int { - return m.Size() -} -func (m *ClaimSource) XXX_DiscardUnknown() { - xxx_messageInfo_ClaimSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ClaimSource proto.InternalMessageInfo - func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} func (*ClientIPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{17} + return fileDescriptor_6c07b07c062484ab, []int{16} } func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +528,7 @@ var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} } func (*ClusterTrustBundleProjection) ProtoMessage() {} func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{18} + return fileDescriptor_6c07b07c062484ab, []int{17} } func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +556,7 @@ var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{19} + return fileDescriptor_6c07b07c062484ab, []int{18} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +584,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{20} + return fileDescriptor_6c07b07c062484ab, []int{19} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +612,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{21} + return fileDescriptor_6c07b07c062484ab, []int{20} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +640,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{22} + return fileDescriptor_6c07b07c062484ab, []int{21} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +668,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{23} + return fileDescriptor_6c07b07c062484ab, []int{22} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +696,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{24} + return fileDescriptor_6c07b07c062484ab, []int{23} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +724,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{25} + return fileDescriptor_6c07b07c062484ab, []int{24} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +752,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{26} + return fileDescriptor_6c07b07c062484ab, []int{25} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +780,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{27} + return fileDescriptor_6c07b07c062484ab, []int{26} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +808,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{28} + return fileDescriptor_6c07b07c062484ab, []int{27} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +836,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{29} + return fileDescriptor_6c07b07c062484ab, []int{28} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +864,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{30} + return fileDescriptor_6c07b07c062484ab, []int{29} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +892,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{31} + return fileDescriptor_6c07b07c062484ab, []int{30} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +920,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{32} + return fileDescriptor_6c07b07c062484ab, []int{31} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +948,7 @@ var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{33} + return fileDescriptor_6c07b07c062484ab, []int{32} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +976,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{34} + return fileDescriptor_6c07b07c062484ab, []int{33} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1004,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{35} + return fileDescriptor_6c07b07c062484ab, []int{34} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1032,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{36} + return fileDescriptor_6c07b07c062484ab, []int{35} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1060,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{37} + return fileDescriptor_6c07b07c062484ab, []int{36} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1113,6 +1085,34 @@ func (m *ContainerStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo +func (m *ContainerUser) Reset() { *m = ContainerUser{} } +func (*ContainerUser) ProtoMessage() {} +func (*ContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{37} +} +func (m *ContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerUser.Merge(m, src) +} +func (m *ContainerUser) XXX_Size() int { + return m.Size() +} +func (m *ContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerUser proto.InternalMessageInfo + func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { @@ -2149,10 +2149,38 @@ func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo +func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} } +func (*ImageVolumeSource) ProtoMessage() {} +func (*ImageVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{75} +} +func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageVolumeSource.Merge(m, src) +} +func (m *ImageVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ImageVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo + func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{75} + return fileDescriptor_6c07b07c062484ab, []int{76} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{76} + return fileDescriptor_6c07b07c062484ab, []int{77} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{77} + return fileDescriptor_6c07b07c062484ab, []int{78} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{78} + return fileDescriptor_6c07b07c062484ab, []int{79} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{79} + return fileDescriptor_6c07b07c062484ab, []int{80} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{80} + return fileDescriptor_6c07b07c062484ab, []int{81} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{81} + return fileDescriptor_6c07b07c062484ab, []int{82} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2345,10 +2373,38 @@ func (m *LimitRangeSpec) XXX_DiscardUnknown() { var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo +func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} } +func (*LinuxContainerUser) ProtoMessage() {} +func (*LinuxContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{83} +} +func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LinuxContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxContainerUser.Merge(m, src) +} +func (m *LinuxContainerUser) XXX_Size() int { + return m.Size() +} +func (m *LinuxContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo + func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{82} + return fileDescriptor_6c07b07c062484ab, []int{84} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2432,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{83} + return fileDescriptor_6c07b07c062484ab, []int{85} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2460,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{84} + return fileDescriptor_6c07b07c062484ab, []int{86} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2488,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{85} + return fileDescriptor_6c07b07c062484ab, []int{87} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2516,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{86} + return fileDescriptor_6c07b07c062484ab, []int{88} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2544,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } func (*ModifyVolumeStatus) ProtoMessage() {} func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{87} + return fileDescriptor_6c07b07c062484ab, []int{89} } func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2572,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{88} + return fileDescriptor_6c07b07c062484ab, []int{90} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2600,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{89} + return fileDescriptor_6c07b07c062484ab, []int{91} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2628,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{90} + return fileDescriptor_6c07b07c062484ab, []int{92} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2656,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{91} + return fileDescriptor_6c07b07c062484ab, []int{93} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2684,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{92} + return fileDescriptor_6c07b07c062484ab, []int{94} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2712,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{93} + return fileDescriptor_6c07b07c062484ab, []int{95} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2740,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{94} + return fileDescriptor_6c07b07c062484ab, []int{96} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2768,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{95} + return fileDescriptor_6c07b07c062484ab, []int{97} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2796,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{96} + return fileDescriptor_6c07b07c062484ab, []int{98} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2824,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{97} + return fileDescriptor_6c07b07c062484ab, []int{99} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2852,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{98} + return fileDescriptor_6c07b07c062484ab, []int{100} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2880,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{99} + return fileDescriptor_6c07b07c062484ab, []int{101} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2908,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{100} + return fileDescriptor_6c07b07c062484ab, []int{102} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2877,10 +2933,38 @@ func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo +func (m *NodeFeatures) Reset() { *m = NodeFeatures{} } +func (*NodeFeatures) ProtoMessage() {} +func (*NodeFeatures) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{103} +} +func (m *NodeFeatures) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeFeatures) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeFeatures.Merge(m, src) +} +func (m *NodeFeatures) XXX_Size() int { + return m.Size() +} +func (m *NodeFeatures) XXX_DiscardUnknown() { + xxx_messageInfo_NodeFeatures.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo + func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{101} + return fileDescriptor_6c07b07c062484ab, []int{104} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2992,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{102} + return fileDescriptor_6c07b07c062484ab, []int{105} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +3020,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} } func (*NodeRuntimeHandler) ProtoMessage() {} func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{103} + return fileDescriptor_6c07b07c062484ab, []int{106} } func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3048,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} } func (*NodeRuntimeHandlerFeatures) ProtoMessage() {} func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{104} + return fileDescriptor_6c07b07c062484ab, []int{107} } func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3076,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{105} + return fileDescriptor_6c07b07c062484ab, []int{108} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3104,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{106} + return fileDescriptor_6c07b07c062484ab, []int{109} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3132,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{107} + return fileDescriptor_6c07b07c062484ab, []int{110} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3160,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{108} + return fileDescriptor_6c07b07c062484ab, []int{111} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3188,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{109} + return fileDescriptor_6c07b07c062484ab, []int{112} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3216,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{110} + return fileDescriptor_6c07b07c062484ab, []int{113} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3244,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{111} + return fileDescriptor_6c07b07c062484ab, []int{114} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3272,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{112} + return fileDescriptor_6c07b07c062484ab, []int{115} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3300,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{113} + return fileDescriptor_6c07b07c062484ab, []int{116} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3328,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{114} + return fileDescriptor_6c07b07c062484ab, []int{117} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{115} + return fileDescriptor_6c07b07c062484ab, []int{118} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{116} + return fileDescriptor_6c07b07c062484ab, []int{119} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{117} + return fileDescriptor_6c07b07c062484ab, []int{120} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{118} + return fileDescriptor_6c07b07c062484ab, []int{121} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{119} + return fileDescriptor_6c07b07c062484ab, []int{122} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{120} + return fileDescriptor_6c07b07c062484ab, []int{123} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{121} + return fileDescriptor_6c07b07c062484ab, []int{124} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{122} + return fileDescriptor_6c07b07c062484ab, []int{125} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{123} + return fileDescriptor_6c07b07c062484ab, []int{126} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{124} + return fileDescriptor_6c07b07c062484ab, []int{127} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{125} + return fileDescriptor_6c07b07c062484ab, []int{128} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3664,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{126} + return fileDescriptor_6c07b07c062484ab, []int{129} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3692,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{127} + return fileDescriptor_6c07b07c062484ab, []int{130} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3720,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{128} + return fileDescriptor_6c07b07c062484ab, []int{131} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3748,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{129} + return fileDescriptor_6c07b07c062484ab, []int{132} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3776,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{130} + return fileDescriptor_6c07b07c062484ab, []int{133} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3804,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{131} + return fileDescriptor_6c07b07c062484ab, []int{134} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3832,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{132} + return fileDescriptor_6c07b07c062484ab, []int{135} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3860,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{133} + return fileDescriptor_6c07b07c062484ab, []int{136} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3888,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{134} + return fileDescriptor_6c07b07c062484ab, []int{137} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3916,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{135} + return fileDescriptor_6c07b07c062484ab, []int{138} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3944,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{136} + return fileDescriptor_6c07b07c062484ab, []int{139} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3972,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{137} + return fileDescriptor_6c07b07c062484ab, []int{140} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +4000,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{138} + return fileDescriptor_6c07b07c062484ab, []int{141} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4028,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{139} + return fileDescriptor_6c07b07c062484ab, []int{142} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4056,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{140} + return fileDescriptor_6c07b07c062484ab, []int{143} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4084,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{141} + return fileDescriptor_6c07b07c062484ab, []int{144} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4112,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{142} + return fileDescriptor_6c07b07c062484ab, []int{145} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4140,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{143} + return fileDescriptor_6c07b07c062484ab, []int{146} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4168,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{144} + return fileDescriptor_6c07b07c062484ab, []int{147} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4196,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{145} + return fileDescriptor_6c07b07c062484ab, []int{148} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4224,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{146} + return fileDescriptor_6c07b07c062484ab, []int{149} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4252,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{147} + return fileDescriptor_6c07b07c062484ab, []int{150} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4280,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{148} + return fileDescriptor_6c07b07c062484ab, []int{151} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4308,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{149} + return fileDescriptor_6c07b07c062484ab, []int{152} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4336,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{150} + return fileDescriptor_6c07b07c062484ab, []int{153} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4364,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{151} + return fileDescriptor_6c07b07c062484ab, []int{154} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4392,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{152} + return fileDescriptor_6c07b07c062484ab, []int{155} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4420,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{153} + return fileDescriptor_6c07b07c062484ab, []int{156} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4448,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{154} + return fileDescriptor_6c07b07c062484ab, []int{157} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4476,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{155} + return fileDescriptor_6c07b07c062484ab, []int{158} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4504,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{156} + return fileDescriptor_6c07b07c062484ab, []int{159} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4532,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{157} + return fileDescriptor_6c07b07c062484ab, []int{160} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4560,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{158} + return fileDescriptor_6c07b07c062484ab, []int{161} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4588,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{159} + return fileDescriptor_6c07b07c062484ab, []int{162} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4616,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{160} + return fileDescriptor_6c07b07c062484ab, []int{163} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4644,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{161} + return fileDescriptor_6c07b07c062484ab, []int{164} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4672,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{162} + return fileDescriptor_6c07b07c062484ab, []int{165} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4700,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{163} + return fileDescriptor_6c07b07c062484ab, []int{166} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4728,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{164} + return fileDescriptor_6c07b07c062484ab, []int{167} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4756,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{165} + return fileDescriptor_6c07b07c062484ab, []int{168} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4784,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{166} + return fileDescriptor_6c07b07c062484ab, []int{169} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4812,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{167} + return fileDescriptor_6c07b07c062484ab, []int{170} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4840,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{168} + return fileDescriptor_6c07b07c062484ab, []int{171} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4868,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{169} + return fileDescriptor_6c07b07c062484ab, []int{172} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4896,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{170} + return fileDescriptor_6c07b07c062484ab, []int{173} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4924,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{171} + return fileDescriptor_6c07b07c062484ab, []int{174} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4865,10 +4949,38 @@ func (m *ResourceFieldSelector) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo +func (m *ResourceHealth) Reset() { *m = ResourceHealth{} } +func (*ResourceHealth) ProtoMessage() {} +func (*ResourceHealth) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{175} +} +func (m *ResourceHealth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHealth) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHealth.Merge(m, src) +} +func (m *ResourceHealth) XXX_Size() int { + return m.Size() +} +func (m *ResourceHealth) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHealth.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo + func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{172} + return fileDescriptor_6c07b07c062484ab, []int{176} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +5008,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{173} + return fileDescriptor_6c07b07c062484ab, []int{177} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5036,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{174} + return fileDescriptor_6c07b07c062484ab, []int{178} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5064,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{175} + return fileDescriptor_6c07b07c062484ab, []int{179} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5092,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{176} + return fileDescriptor_6c07b07c062484ab, []int{180} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5005,10 +5117,38 @@ func (m *ResourceRequirements) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo +func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } +func (*ResourceStatus) ProtoMessage() {} +func (*ResourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{181} +} +func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceStatus.Merge(m, src) +} +func (m *ResourceStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo + func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{177} + return fileDescriptor_6c07b07c062484ab, []int{182} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5176,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{178} + return fileDescriptor_6c07b07c062484ab, []int{183} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5204,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{179} + return fileDescriptor_6c07b07c062484ab, []int{184} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5232,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{180} + return fileDescriptor_6c07b07c062484ab, []int{185} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5260,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{181} + return fileDescriptor_6c07b07c062484ab, []int{186} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5288,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{182} + return fileDescriptor_6c07b07c062484ab, []int{187} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5316,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{183} + return fileDescriptor_6c07b07c062484ab, []int{188} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5344,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{184} + return fileDescriptor_6c07b07c062484ab, []int{189} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5372,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{185} + return fileDescriptor_6c07b07c062484ab, []int{190} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5400,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{186} + return fileDescriptor_6c07b07c062484ab, []int{191} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5428,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{187} + return fileDescriptor_6c07b07c062484ab, []int{192} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5456,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{188} + return fileDescriptor_6c07b07c062484ab, []int{193} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5484,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{189} + return fileDescriptor_6c07b07c062484ab, []int{194} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5512,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{190} + return fileDescriptor_6c07b07c062484ab, []int{195} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5540,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{191} + return fileDescriptor_6c07b07c062484ab, []int{196} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5568,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{192} + return fileDescriptor_6c07b07c062484ab, []int{197} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5596,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{193} + return fileDescriptor_6c07b07c062484ab, []int{198} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5624,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{194} + return fileDescriptor_6c07b07c062484ab, []int{199} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5652,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{195} + return fileDescriptor_6c07b07c062484ab, []int{200} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5680,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{196} + return fileDescriptor_6c07b07c062484ab, []int{201} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5708,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{197} + return fileDescriptor_6c07b07c062484ab, []int{202} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5736,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{198} + return fileDescriptor_6c07b07c062484ab, []int{203} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5764,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{199} + return fileDescriptor_6c07b07c062484ab, []int{204} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5792,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{200} + return fileDescriptor_6c07b07c062484ab, []int{205} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5820,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{201} + return fileDescriptor_6c07b07c062484ab, []int{206} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5848,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *SleepAction) Reset() { *m = SleepAction{} } func (*SleepAction) ProtoMessage() {} func (*SleepAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{202} + return fileDescriptor_6c07b07c062484ab, []int{207} } func (m *SleepAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5876,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{203} + return fileDescriptor_6c07b07c062484ab, []int{208} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5904,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{204} + return fileDescriptor_6c07b07c062484ab, []int{209} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5932,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{205} + return fileDescriptor_6c07b07c062484ab, []int{210} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5960,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{206} + return fileDescriptor_6c07b07c062484ab, []int{211} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5988,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{207} + return fileDescriptor_6c07b07c062484ab, []int{212} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +6016,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{208} + return fileDescriptor_6c07b07c062484ab, []int{213} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +6044,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{209} + return fileDescriptor_6c07b07c062484ab, []int{214} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +6072,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{210} + return fileDescriptor_6c07b07c062484ab, []int{215} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +6100,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{211} + return fileDescriptor_6c07b07c062484ab, []int{216} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6128,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{212} + return fileDescriptor_6c07b07c062484ab, []int{217} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6156,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{213} + return fileDescriptor_6c07b07c062484ab, []int{218} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6184,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{214} + return fileDescriptor_6c07b07c062484ab, []int{219} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6212,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{215} + return fileDescriptor_6c07b07c062484ab, []int{220} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6240,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{216} + return fileDescriptor_6c07b07c062484ab, []int{221} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6268,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} } func (*VolumeMountStatus) ProtoMessage() {} func (*VolumeMountStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{217} + return fileDescriptor_6c07b07c062484ab, []int{222} } func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6156,7 +6296,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{218} + return fileDescriptor_6c07b07c062484ab, []int{223} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6184,7 +6324,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{219} + return fileDescriptor_6c07b07c062484ab, []int{224} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6212,7 +6352,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } func (*VolumeResourceRequirements) ProtoMessage() {} func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{220} + return fileDescriptor_6c07b07c062484ab, []int{225} } func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6240,7 +6380,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{221} + return fileDescriptor_6c07b07c062484ab, []int{226} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6268,7 +6408,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{222} + return fileDescriptor_6c07b07c062484ab, []int{227} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6296,7 +6436,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{223} + return fileDescriptor_6c07b07c062484ab, []int{228} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6324,7 +6464,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{224} + return fileDescriptor_6c07b07c062484ab, []int{229} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6368,7 +6508,6 @@ func init() { proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") @@ -6393,6 +6532,7 @@ func init() { proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") + proto.RegisterType((*ContainerUser)(nil), "k8s.io.api.core.v1.ContainerUser") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6432,6 +6572,7 @@ func init() { proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*ImageVolumeSource)(nil), "k8s.io.api.core.v1.ImageVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler") @@ -6444,6 +6585,7 @@ func init() { proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*LinuxContainerUser)(nil), "k8s.io.api.core.v1.LinuxContainerUser") proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") @@ -6463,6 +6605,7 @@ func init() { proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeFeatures)(nil), "k8s.io.api.core.v1.NodeFeatures") proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") proto.RegisterType((*NodeRuntimeHandler)(nil), "k8s.io.api.core.v1.NodeRuntimeHandler") @@ -6543,6 +6686,7 @@ func init() { proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim") proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceHealth)(nil), "k8s.io.api.core.v1.ResourceHealth") proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") @@ -6553,6 +6697,7 @@ func init() { proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*ResourceStatus)(nil), "k8s.io.api.core.v1.ResourceStatus") proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") @@ -6613,989 +6758,1011 @@ func init() { } var fileDescriptor_6c07b07c062484ab = []byte{ - // 15708 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x59, 0x8c, 0x1c, 0xd9, - 0x75, 0x20, 0xaa, 0xc8, 0xac, 0xf5, 0xd4, 0x7e, 0x8b, 0x64, 0x17, 0xab, 0x49, 0x26, 0x3b, 0xba, - 0x9b, 0xcd, 0xde, 0x8a, 0x62, 0x2f, 0xea, 0x56, 0x77, 0xab, 0xad, 0x5a, 0xc9, 0x6c, 0x56, 0x15, - 0xb3, 0x6f, 0x16, 0x49, 0xa9, 0xd5, 0x92, 0x15, 0xcc, 0xbc, 0x55, 0x15, 0xaa, 0xcc, 0x88, 0xec, - 0x88, 0xc8, 0x22, 0x8b, 0x4f, 0x86, 0x6d, 0xf9, 0x59, 0xb6, 0x64, 0x3f, 0x40, 0x78, 0xf0, 0x5b, - 0x20, 0x1b, 0xc6, 0x83, 0x9f, 0x9f, 0x97, 0xa7, 0x67, 0xbf, 0xd1, 0xc8, 0xe3, 0x4d, 0xde, 0xc6, - 0x33, 0x03, 0xd8, 0x83, 0x81, 0xc7, 0x63, 0xc0, 0x96, 0x31, 0xc6, 0x94, 0x47, 0xf4, 0x00, 0x86, - 0x3f, 0xc6, 0x36, 0x3c, 0xf3, 0x31, 0x53, 0xf0, 0x8c, 0x07, 0x77, 0x8d, 0x7b, 0x63, 0xc9, 0xcc, - 0x62, 0x93, 0xa5, 0x96, 0xd0, 0x7f, 0x99, 0xe7, 0x9c, 0x7b, 0xee, 0x8d, 0xbb, 0x9e, 0x7b, 0xce, - 0xb9, 0xe7, 0x80, 0xbd, 0xf3, 0x72, 0x38, 0xe7, 0xfa, 0x17, 0x9c, 0x96, 0x7b, 0xa1, 0xe6, 0x07, - 0xe4, 0xc2, 0xee, 0xc5, 0x0b, 0x5b, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xfa, 0x5c, 0x2b, 0xf0, 0x23, - 0x1f, 0x21, 0x4e, 0x33, 0xe7, 0xb4, 0xdc, 0x39, 0x4a, 0x33, 0xb7, 0x7b, 0x71, 0xf6, 0xd9, 0x2d, - 0x37, 0xda, 0x6e, 0xdf, 0x9c, 0xab, 0xf9, 0xcd, 0x0b, 0x5b, 0xfe, 0x96, 0x7f, 0x81, 0x91, 0xde, - 0x6c, 0x6f, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x59, 0xcc, 0xbe, 0x10, 0x57, 0xd3, 0x74, 0x6a, - 0xdb, 0xae, 0x47, 0x82, 0xbd, 0x0b, 0xad, 0x9d, 0x2d, 0x56, 0x6f, 0x40, 0x42, 0xbf, 0x1d, 0xd4, - 0x48, 0xb2, 0xe2, 0x8e, 0xa5, 0xc2, 0x0b, 0x4d, 0x12, 0x39, 0x19, 0xcd, 0x9d, 0xbd, 0x90, 0x57, - 0x2a, 0x68, 0x7b, 0x91, 0xdb, 0x4c, 0x57, 0xf3, 0xa1, 0x6e, 0x05, 0xc2, 0xda, 0x36, 0x69, 0x3a, - 0xa9, 0x72, 0xcf, 0xe7, 0x95, 0x6b, 0x47, 0x6e, 0xe3, 0x82, 0xeb, 0x45, 0x61, 0x14, 0x24, 0x0b, - 0xd9, 0xdf, 0xb0, 0xe0, 0xec, 0xfc, 0x8d, 0xea, 0x72, 0xc3, 0x09, 0x23, 0xb7, 0xb6, 0xd0, 0xf0, - 0x6b, 0x3b, 0xd5, 0xc8, 0x0f, 0xc8, 0x75, 0xbf, 0xd1, 0x6e, 0x92, 0x2a, 0xeb, 0x08, 0xf4, 0x0c, - 0x0c, 0xed, 0xb2, 0xff, 0xe5, 0xa5, 0x19, 0xeb, 0xac, 0x75, 0x7e, 0x78, 0x61, 0xf2, 0xf7, 0xf6, - 0x4b, 0x1f, 0xb8, 0xbb, 0x5f, 0x1a, 0xba, 0x2e, 0xe0, 0x58, 0x51, 0xa0, 0x73, 0x30, 0xb0, 0x19, - 0x6e, 0xec, 0xb5, 0xc8, 0x4c, 0x81, 0xd1, 0x8e, 0x0b, 0xda, 0x81, 0x95, 0x2a, 0x85, 0x62, 0x81, - 0x45, 0x17, 0x60, 0xb8, 0xe5, 0x04, 0x91, 0x1b, 0xb9, 0xbe, 0x37, 0x53, 0x3c, 0x6b, 0x9d, 0xef, - 0x5f, 0x98, 0x12, 0xa4, 0xc3, 0x15, 0x89, 0xc0, 0x31, 0x0d, 0x6d, 0x46, 0x40, 0x9c, 0xfa, 0x55, - 0xaf, 0xb1, 0x37, 0xd3, 0x77, 0xd6, 0x3a, 0x3f, 0x14, 0x37, 0x03, 0x0b, 0x38, 0x56, 0x14, 0xf6, - 0x97, 0x0b, 0x30, 0x34, 0xbf, 0xb9, 0xe9, 0x7a, 0x6e, 0xb4, 0x87, 0xae, 0xc3, 0xa8, 0xe7, 0xd7, - 0x89, 0xfc, 0xcf, 0xbe, 0x62, 0xe4, 0xb9, 0xb3, 0x73, 0xe9, 0xa9, 0x34, 0xb7, 0xae, 0xd1, 0x2d, - 0x4c, 0xde, 0xdd, 0x2f, 0x8d, 0xea, 0x10, 0x6c, 0xf0, 0x41, 0x18, 0x46, 0x5a, 0x7e, 0x5d, 0xb1, - 0x2d, 0x30, 0xb6, 0xa5, 0x2c, 0xb6, 0x95, 0x98, 0x6c, 0x61, 0xe2, 0xee, 0x7e, 0x69, 0x44, 0x03, - 0x60, 0x9d, 0x09, 0xba, 0x09, 0x13, 0xf4, 0xaf, 0x17, 0xb9, 0x8a, 0x6f, 0x91, 0xf1, 0x7d, 0x34, - 0x8f, 0xaf, 0x46, 0xba, 0x30, 0x7d, 0x77, 0xbf, 0x34, 0x91, 0x00, 0xe2, 0x24, 0x43, 0xfb, 0x47, - 0x2d, 0x98, 0x98, 0x6f, 0xb5, 0xe6, 0x83, 0xa6, 0x1f, 0x54, 0x02, 0x7f, 0xd3, 0x6d, 0x10, 0xf4, - 0x12, 0xf4, 0x45, 0x74, 0xd4, 0xf8, 0x08, 0x3f, 0x2a, 0xba, 0xb6, 0x8f, 0x8e, 0xd5, 0xc1, 0x7e, - 0x69, 0x3a, 0x41, 0xce, 0x86, 0x92, 0x15, 0x40, 0x1f, 0x85, 0xc9, 0x86, 0x5f, 0x73, 0x1a, 0xdb, - 0x7e, 0x18, 0x09, 0xac, 0x18, 0xfa, 0x63, 0x77, 0xf7, 0x4b, 0x93, 0xab, 0x09, 0x1c, 0x4e, 0x51, - 0xdb, 0x77, 0x60, 0x7c, 0x3e, 0x8a, 0x9c, 0xda, 0x36, 0xa9, 0xf3, 0x09, 0x85, 0x5e, 0x80, 0x3e, - 0xcf, 0x69, 0xca, 0xc6, 0x9c, 0x95, 0x8d, 0x59, 0x77, 0x9a, 0xb4, 0x31, 0x93, 0xd7, 0x3c, 0xf7, - 0x9d, 0xb6, 0x98, 0xa4, 0x14, 0x86, 0x19, 0x35, 0x7a, 0x0e, 0xa0, 0x4e, 0x76, 0xdd, 0x1a, 0xa9, - 0x38, 0xd1, 0xb6, 0x68, 0x03, 0x12, 0x65, 0x61, 0x49, 0x61, 0xb0, 0x46, 0x65, 0xdf, 0x86, 0xe1, - 0xf9, 0x5d, 0xdf, 0xad, 0x57, 0xfc, 0x7a, 0x88, 0x76, 0x60, 0xa2, 0x15, 0x90, 0x4d, 0x12, 0x28, - 0xd0, 0x8c, 0x75, 0xb6, 0x78, 0x7e, 0xe4, 0xb9, 0xf3, 0x99, 0x7d, 0x6f, 0x92, 0x2e, 0x7b, 0x51, - 0xb0, 0xb7, 0xf0, 0x90, 0xa8, 0x6f, 0x22, 0x81, 0xc5, 0x49, 0xce, 0xf6, 0x3f, 0x2f, 0xc0, 0xf1, - 0xf9, 0x3b, 0xed, 0x80, 0x2c, 0xb9, 0xe1, 0x4e, 0x72, 0xc1, 0xd5, 0xdd, 0x70, 0x67, 0x3d, 0xee, - 0x01, 0x35, 0xd3, 0x97, 0x04, 0x1c, 0x2b, 0x0a, 0xf4, 0x2c, 0x0c, 0xd2, 0xdf, 0xd7, 0x70, 0x59, - 0x7c, 0xf2, 0xb4, 0x20, 0x1e, 0x59, 0x72, 0x22, 0x67, 0x89, 0xa3, 0xb0, 0xa4, 0x41, 0x6b, 0x30, - 0x52, 0x63, 0xfb, 0xc3, 0xd6, 0x9a, 0x5f, 0x27, 0x6c, 0x6e, 0x0d, 0x2f, 0x3c, 0x4d, 0xc9, 0x17, - 0x63, 0xf0, 0xc1, 0x7e, 0x69, 0x86, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, - 0xdc, 0xfb, 0x18, 0x27, 0xc8, 0x58, 0xea, 0xe7, 0xb5, 0x95, 0xdb, 0xcf, 0x56, 0xee, 0x68, 0xf6, - 0xaa, 0x45, 0x17, 0xa1, 0x6f, 0xc7, 0xf5, 0xea, 0x33, 0x03, 0x8c, 0xd7, 0x69, 0x3a, 0xe6, 0x57, - 0x5c, 0xaf, 0x7e, 0xb0, 0x5f, 0x9a, 0x32, 0x9a, 0x43, 0x81, 0x98, 0x91, 0xda, 0xff, 0xc9, 0x82, - 0x12, 0xc3, 0xad, 0xb8, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0xa3, 0x43, 0x9f, - 0x03, 0x08, 0x49, 0x2d, 0x20, 0x91, 0xd6, 0xa5, 0x6a, 0x62, 0x54, 0x15, 0x06, 0x6b, 0x54, 0x74, - 0x7f, 0x0a, 0xb7, 0x9d, 0x80, 0xcd, 0x2f, 0xd1, 0xb1, 0x6a, 0x7f, 0xaa, 0x4a, 0x04, 0x8e, 0x69, - 0x8c, 0xfd, 0xa9, 0xd8, 0x6d, 0x7f, 0x42, 0x1f, 0x81, 0x89, 0xb8, 0xb2, 0xb0, 0xe5, 0xd4, 0x64, - 0x07, 0xb2, 0x15, 0x5c, 0x35, 0x51, 0x38, 0x49, 0x6b, 0xff, 0xbf, 0x96, 0x98, 0x3c, 0xf4, 0xab, - 0xdf, 0xe3, 0xdf, 0x6a, 0xff, 0xaa, 0x05, 0x83, 0x0b, 0xae, 0x57, 0x77, 0xbd, 0x2d, 0xf4, 0x69, - 0x18, 0xa2, 0x47, 0x65, 0xdd, 0x89, 0x1c, 0xb1, 0x0d, 0x7f, 0x50, 0x5b, 0x5b, 0xea, 0xe4, 0x9a, - 0x6b, 0xed, 0x6c, 0x51, 0x40, 0x38, 0x47, 0xa9, 0xe9, 0x6a, 0xbb, 0x7a, 0xf3, 0x33, 0xa4, 0x16, - 0xad, 0x91, 0xc8, 0x89, 0x3f, 0x27, 0x86, 0x61, 0xc5, 0x15, 0x5d, 0x81, 0x81, 0xc8, 0x09, 0xb6, - 0x48, 0x24, 0xf6, 0xe3, 0xcc, 0x7d, 0x93, 0x97, 0xc4, 0x74, 0x45, 0x12, 0xaf, 0x46, 0xe2, 0x53, - 0x6a, 0x83, 0x15, 0xc5, 0x82, 0x85, 0xfd, 0xdf, 0x06, 0xe1, 0xe4, 0x62, 0xb5, 0x9c, 0x33, 0xaf, - 0xce, 0xc1, 0x40, 0x3d, 0x70, 0x77, 0x49, 0x20, 0xfa, 0x59, 0x71, 0x59, 0x62, 0x50, 0x2c, 0xb0, - 0xe8, 0x65, 0x18, 0xe5, 0xe7, 0xe3, 0x65, 0xc7, 0xab, 0xc7, 0xdb, 0xa3, 0xa0, 0x1e, 0xbd, 0xae, - 0xe1, 0xb0, 0x41, 0x79, 0xc8, 0x49, 0x75, 0x2e, 0xb1, 0x18, 0xf3, 0xce, 0xde, 0x2f, 0x58, 0x30, - 0xc9, 0xab, 0x99, 0x8f, 0xa2, 0xc0, 0xbd, 0xd9, 0x8e, 0x48, 0x38, 0xd3, 0xcf, 0x76, 0xba, 0xc5, - 0xac, 0xde, 0xca, 0xed, 0x81, 0xb9, 0xeb, 0x09, 0x2e, 0x7c, 0x13, 0x9c, 0x11, 0xf5, 0x4e, 0x26, - 0xd1, 0x38, 0x55, 0x2d, 0xfa, 0x01, 0x0b, 0x66, 0x6b, 0xbe, 0x17, 0x05, 0x7e, 0xa3, 0x41, 0x82, - 0x4a, 0xfb, 0x66, 0xc3, 0x0d, 0xb7, 0xf9, 0x3c, 0xc5, 0x64, 0x93, 0xed, 0x04, 0x39, 0x63, 0xa8, - 0x88, 0xc4, 0x18, 0x9e, 0xb9, 0xbb, 0x5f, 0x9a, 0x5d, 0xcc, 0x65, 0x85, 0x3b, 0x54, 0x83, 0x76, - 0x00, 0xd1, 0x93, 0xbd, 0x1a, 0x39, 0x5b, 0x24, 0xae, 0x7c, 0xb0, 0xf7, 0xca, 0x4f, 0xdc, 0xdd, - 0x2f, 0xa1, 0xf5, 0x14, 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x03, 0xc7, 0x28, 0x34, 0xf5, 0xad, 0x43, - 0xbd, 0x57, 0x37, 0x73, 0x77, 0xbf, 0x74, 0x6c, 0x3d, 0x83, 0x09, 0xce, 0x64, 0x8d, 0xbe, 0xcf, - 0x82, 0x93, 0xf1, 0xe7, 0x2f, 0xdf, 0x6e, 0x39, 0x5e, 0x3d, 0xae, 0x78, 0xb8, 0xf7, 0x8a, 0xe9, - 0x9e, 0x7c, 0x72, 0x31, 0x8f, 0x13, 0xce, 0xaf, 0x04, 0x79, 0x30, 0x4d, 0x9b, 0x96, 0xac, 0x1b, - 0x7a, 0xaf, 0xfb, 0xa1, 0xbb, 0xfb, 0xa5, 0xe9, 0xf5, 0x34, 0x0f, 0x9c, 0xc5, 0x78, 0x76, 0x11, - 0x8e, 0x67, 0xce, 0x4e, 0x34, 0x09, 0xc5, 0x1d, 0xc2, 0x85, 0xc0, 0x61, 0x4c, 0x7f, 0xa2, 0x63, - 0xd0, 0xbf, 0xeb, 0x34, 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0x95, 0xc2, 0xcb, 0x96, 0xfd, 0x2f, - 0x8a, 0x30, 0xb1, 0x58, 0x2d, 0xdf, 0xd3, 0xaa, 0xd7, 0x8f, 0xbd, 0x42, 0xc7, 0x63, 0x2f, 0x3e, - 0x44, 0x8b, 0xb9, 0x87, 0xe8, 0xf7, 0x66, 0x2c, 0xd9, 0x3e, 0xb6, 0x64, 0x3f, 0x9c, 0xb3, 0x64, - 0xef, 0xf3, 0x42, 0xdd, 0xcd, 0x99, 0xb5, 0xfd, 0x6c, 0x00, 0x33, 0x25, 0x24, 0x26, 0xfb, 0x25, - 0xb7, 0xda, 0x43, 0x4e, 0xdd, 0xfb, 0x33, 0x8e, 0x35, 0x18, 0x5d, 0x74, 0x5a, 0xce, 0x4d, 0xb7, - 0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x13, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x0d, 0x2f, 0x1c, 0xbf, - 0xbb, 0x5f, 0x2a, 0xce, 0xd7, 0xa9, 0x98, 0x01, 0x8a, 0x6a, 0x0f, 0x53, 0x0a, 0xf4, 0x14, 0xf4, - 0xd5, 0x03, 0xbf, 0x35, 0x53, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x05, 0x7e, 0x2b, 0x41, 0xca, - 0x68, 0xec, 0xdf, 0x29, 0xc0, 0xa9, 0x45, 0xd2, 0xda, 0x5e, 0xa9, 0xe6, 0x9c, 0x17, 0xe7, 0x61, - 0xa8, 0xe9, 0x7b, 0x6e, 0xe4, 0x07, 0xa1, 0xa8, 0x9a, 0xcd, 0x88, 0x35, 0x01, 0xc3, 0x0a, 0x8b, - 0xce, 0x42, 0x5f, 0x2b, 0x16, 0x62, 0x47, 0xa5, 0x00, 0xcc, 0xc4, 0x57, 0x86, 0xa1, 0x14, 0xed, - 0x90, 0x04, 0x62, 0xc6, 0x28, 0x8a, 0x6b, 0x21, 0x09, 0x30, 0xc3, 0xc4, 0x92, 0x00, 0x95, 0x11, - 0xc4, 0x89, 0x90, 0x90, 0x04, 0x28, 0x06, 0x6b, 0x54, 0xa8, 0x02, 0xc3, 0x61, 0x62, 0x64, 0x7b, - 0x5a, 0x9a, 0x63, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, 0xc2, - 0xd7, 0x0b, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0xb5, 0x74, 0xc7, 0xf5, 0xbe, 0x24, 0xee, - 0x57, 0xef, 0xfd, 0x67, 0x0b, 0x4e, 0x2d, 0xba, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, 0x5c, - 0xe5, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x3e, 0x4c, 0x31, 0xfb, 0x6f, 0x2c, 0x40, 0xfc, - 0xb3, 0xdf, 0x73, 0x1f, 0x7b, 0x2d, 0xfd, 0xb1, 0xf7, 0x61, 0x5a, 0xd8, 0xff, 0xbf, 0x05, 0x23, - 0x8b, 0x0d, 0xc7, 0x6d, 0x8a, 0x4f, 0x5d, 0x84, 0x29, 0xa9, 0xb7, 0x62, 0x60, 0x4d, 0xf6, 0xa7, - 0x9b, 0xdb, 0x14, 0x4e, 0x22, 0x71, 0x9a, 0x1e, 0x7d, 0x02, 0x4e, 0x1a, 0xc0, 0x0d, 0xd2, 0x6c, - 0x35, 0x9c, 0x48, 0xbf, 0x15, 0xb0, 0xd3, 0x1f, 0xe7, 0x11, 0xe1, 0xfc, 0xf2, 0xf6, 0x2a, 0x8c, - 0x2f, 0x36, 0x5c, 0xe2, 0x45, 0xe5, 0xca, 0xa2, 0xef, 0x6d, 0xba, 0x5b, 0xe8, 0x15, 0x18, 0x8f, - 0xdc, 0x26, 0xf1, 0xdb, 0x51, 0x95, 0xd4, 0x7c, 0x8f, 0xdd, 0xb5, 0xad, 0xf3, 0xfd, 0x0b, 0xe8, - 0xee, 0x7e, 0x69, 0x7c, 0xc3, 0xc0, 0xe0, 0x04, 0xa5, 0xfd, 0x33, 0x74, 0xa7, 0x6d, 0xb4, 0xc3, - 0x88, 0x04, 0x1b, 0x41, 0x3b, 0x8c, 0x16, 0xda, 0x54, 0x5a, 0xae, 0x04, 0x3e, 0xed, 0x40, 0xd7, - 0xf7, 0xd0, 0x29, 0x43, 0x81, 0x30, 0x24, 0x95, 0x07, 0x42, 0x51, 0x30, 0x07, 0x10, 0xba, 0x5b, - 0x1e, 0x09, 0xb4, 0x4f, 0x1b, 0x67, 0x8b, 0x5b, 0x41, 0xb1, 0x46, 0x81, 0x1a, 0x30, 0xd6, 0x70, - 0x6e, 0x92, 0x46, 0x95, 0x34, 0x48, 0x2d, 0xf2, 0x03, 0xa1, 0x91, 0x79, 0xbe, 0xb7, 0x9b, 0xcb, - 0xaa, 0x5e, 0x74, 0x61, 0xea, 0xee, 0x7e, 0x69, 0xcc, 0x00, 0x61, 0x93, 0x39, 0xdd, 0xec, 0xfc, - 0x16, 0xfd, 0x0a, 0xa7, 0xa1, 0x5f, 0x97, 0xaf, 0x0a, 0x18, 0x56, 0x58, 0xb5, 0xd9, 0xf5, 0xe5, - 0x6d, 0x76, 0xf6, 0x9f, 0xd1, 0xa5, 0xe1, 0x37, 0x5b, 0xbe, 0x47, 0xbc, 0x68, 0xd1, 0xf7, 0xea, - 0x5c, 0x97, 0xf6, 0x8a, 0xa1, 0xec, 0x39, 0x97, 0x50, 0xf6, 0x9c, 0x48, 0x97, 0xd0, 0xf4, 0x3d, - 0x1f, 0x86, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3a, 0xee, 0x11, 0xb9, 0x50, 0xaa, 0x0c, 0x7a, - 0xb0, 0x5f, 0x9a, 0x50, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, 0x18, 0x6c, 0x92, 0x30, 0x74, - 0xb6, 0xa4, 0xa0, 0x33, 0x21, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x28, 0xf4, 0x93, - 0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x63, 0x82, 0xb0, 0x7f, 0x99, 0x02, 0x31, 0xc7, 0xd9, 0xff, 0xda, - 0x82, 0x09, 0xd5, 0x56, 0x5e, 0xd7, 0x11, 0x5c, 0x30, 0xdf, 0x02, 0xa8, 0xc9, 0x0f, 0x0c, 0x99, - 0x60, 0x30, 0xf2, 0xdc, 0xb9, 0x4c, 0x19, 0x2c, 0xd5, 0x8d, 0x31, 0x67, 0x05, 0x0a, 0xb1, 0xc6, - 0xcd, 0xfe, 0x4d, 0x0b, 0xa6, 0x13, 0x5f, 0xb4, 0xea, 0x86, 0x11, 0x7a, 0x3b, 0xf5, 0x55, 0x73, - 0x3d, 0x4e, 0x3e, 0x37, 0xe4, 0xdf, 0xa4, 0x76, 0x29, 0x09, 0xd1, 0xbe, 0xe8, 0x32, 0xf4, 0xbb, - 0x11, 0x69, 0xca, 0x8f, 0x79, 0xb4, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0x52, 0xa6, 0x25, 0x31, - 0x67, 0x60, 0xff, 0x4e, 0x11, 0x86, 0xf9, 0xfa, 0x5e, 0x73, 0x5a, 0x47, 0x30, 0x16, 0x4f, 0xc3, - 0xb0, 0xdb, 0x6c, 0xb6, 0x23, 0xe7, 0xa6, 0x38, 0xa9, 0x87, 0xf8, 0xae, 0x59, 0x96, 0x40, 0x1c, - 0xe3, 0x51, 0x19, 0xfa, 0x58, 0x53, 0xf8, 0x57, 0x3e, 0x91, 0xfd, 0x95, 0xa2, 0xed, 0x73, 0x4b, - 0x4e, 0xe4, 0x70, 0x21, 0x59, 0xad, 0x2b, 0x0a, 0xc2, 0x8c, 0x05, 0x72, 0x00, 0x6e, 0xba, 0x9e, - 0x13, 0xec, 0x51, 0xd8, 0x4c, 0x91, 0x31, 0x7c, 0xb6, 0x33, 0xc3, 0x05, 0x45, 0xcf, 0xd9, 0xaa, - 0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xfb, 0x12, 0x0c, 0x2b, 0xe2, 0xc3, 0xc8, 0xba, 0xb3, 0x1f, - 0x81, 0x89, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xea, 0xa2, 0xf2, 0xaf, 0xb3, 0x2d, 0x43, 0xb4, 0x7a, - 0xd9, 0xdb, 0x15, 0x47, 0xcc, 0x1d, 0x38, 0xd6, 0xc8, 0x38, 0xa4, 0xc4, 0xb8, 0xf6, 0x7e, 0xa8, - 0x9d, 0x12, 0x9f, 0x7d, 0x2c, 0x0b, 0x8b, 0x33, 0xeb, 0x30, 0x76, 0xc4, 0x42, 0xa7, 0x1d, 0x91, - 0xee, 0x77, 0xc7, 0x54, 0xe3, 0xaf, 0x90, 0x3d, 0xb5, 0xa9, 0x7e, 0x2b, 0x9b, 0x7f, 0x9a, 0xf7, - 0x3e, 0xdf, 0x2e, 0x47, 0x04, 0x83, 0xe2, 0x15, 0xb2, 0xc7, 0x87, 0x42, 0xff, 0xba, 0x62, 0xc7, - 0xaf, 0xfb, 0xaa, 0x05, 0x63, 0xea, 0xeb, 0x8e, 0x60, 0x5f, 0x58, 0x30, 0xf7, 0x85, 0xd3, 0x1d, - 0x27, 0x78, 0xce, 0x8e, 0xf0, 0xf5, 0x02, 0x9c, 0x54, 0x34, 0xf4, 0xda, 0xc7, 0xff, 0x88, 0x59, - 0x75, 0x01, 0x86, 0x3d, 0xa5, 0x00, 0xb5, 0x4c, 0xcd, 0x63, 0xac, 0xfe, 0x8c, 0x69, 0xe8, 0x91, - 0xe7, 0xc5, 0x87, 0xf6, 0xa8, 0x6e, 0x19, 0x10, 0x87, 0xfb, 0x02, 0x14, 0xdb, 0x6e, 0x5d, 0x1c, - 0x30, 0x1f, 0x94, 0xbd, 0x7d, 0xad, 0xbc, 0x74, 0xb0, 0x5f, 0x7a, 0x24, 0xcf, 0x48, 0x46, 0x4f, - 0xb6, 0x70, 0xee, 0x5a, 0x79, 0x09, 0xd3, 0xc2, 0x68, 0x1e, 0x26, 0xa4, 0x28, 0x73, 0x9d, 0x4a, - 0xd2, 0xbe, 0x27, 0xce, 0x21, 0xa5, 0xde, 0xc7, 0x26, 0x1a, 0x27, 0xe9, 0xd1, 0x12, 0x4c, 0xee, - 0xb4, 0x6f, 0x92, 0x06, 0x89, 0xf8, 0x07, 0x5f, 0x21, 0x5c, 0xf9, 0x3d, 0x1c, 0x5f, 0xba, 0xaf, - 0x24, 0xf0, 0x38, 0x55, 0xc2, 0xfe, 0x07, 0x76, 0x1e, 0x88, 0xde, 0xd3, 0xe4, 0x9b, 0x6f, 0xe5, - 0x74, 0xee, 0x65, 0x56, 0x5c, 0x21, 0x7b, 0x1b, 0x3e, 0x95, 0x43, 0xb2, 0x67, 0x85, 0x31, 0xe7, - 0xfb, 0x3a, 0xce, 0xf9, 0x5f, 0x2a, 0xc0, 0x71, 0xd5, 0x03, 0x86, 0x7c, 0xff, 0xed, 0xde, 0x07, - 0x17, 0x61, 0xa4, 0x4e, 0x36, 0x9d, 0x76, 0x23, 0x52, 0x96, 0x98, 0x7e, 0x6e, 0x1c, 0x5c, 0x8a, - 0xc1, 0x58, 0xa7, 0x39, 0x44, 0xb7, 0xfd, 0xc2, 0x18, 0x3b, 0x88, 0x23, 0x87, 0xce, 0x71, 0xb5, - 0x6a, 0xac, 0xdc, 0x55, 0xf3, 0x28, 0xf4, 0xbb, 0x4d, 0x2a, 0x98, 0x15, 0x4c, 0x79, 0xab, 0x4c, - 0x81, 0x98, 0xe3, 0xd0, 0xe3, 0x30, 0x58, 0xf3, 0x9b, 0x4d, 0xc7, 0xab, 0xb3, 0x23, 0x6f, 0x78, - 0x61, 0x84, 0xca, 0x6e, 0x8b, 0x1c, 0x84, 0x25, 0x8e, 0x0a, 0xdf, 0x4e, 0xb0, 0xc5, 0xd5, 0x53, - 0x42, 0xf8, 0x9e, 0x0f, 0xb6, 0x42, 0xcc, 0xa0, 0xf4, 0x76, 0x7d, 0xcb, 0x0f, 0x76, 0x5c, 0x6f, - 0x6b, 0xc9, 0x0d, 0xc4, 0x92, 0x50, 0x67, 0xe1, 0x0d, 0x85, 0xc1, 0x1a, 0x15, 0x5a, 0x81, 0xfe, - 0x96, 0x1f, 0x44, 0xe1, 0xcc, 0x00, 0xeb, 0xee, 0x47, 0x72, 0x36, 0x22, 0xfe, 0xb5, 0x15, 0x3f, - 0x88, 0xe2, 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0x8b, 0xa3, 0x55, 0x18, 0x24, 0xde, 0xee, 0x4a, 0xe0, - 0x37, 0x67, 0xa6, 0xf3, 0x39, 0x2d, 0x73, 0x12, 0x3e, 0xcd, 0x62, 0x19, 0x55, 0x80, 0xb1, 0x64, - 0x81, 0x3e, 0x0c, 0x45, 0xe2, 0xed, 0xce, 0x0c, 0x32, 0x4e, 0xb3, 0x39, 0x9c, 0xae, 0x3b, 0x41, - 0xbc, 0xe7, 0x2f, 0x7b, 0xbb, 0x98, 0x96, 0x41, 0x1f, 0x87, 0x61, 0xb9, 0x61, 0x84, 0x42, 0xef, - 0x9b, 0x39, 0x61, 0xe5, 0x36, 0x83, 0xc9, 0x3b, 0x6d, 0x37, 0x20, 0x4d, 0xe2, 0x45, 0x61, 0xbc, - 0x43, 0x4a, 0x6c, 0x88, 0x63, 0x6e, 0xa8, 0x06, 0xa3, 0x01, 0x09, 0xdd, 0x3b, 0xa4, 0xe2, 0x37, - 0xdc, 0xda, 0xde, 0xcc, 0x43, 0xac, 0x79, 0x4f, 0x76, 0xec, 0x32, 0xac, 0x15, 0x88, 0xed, 0x12, - 0x3a, 0x14, 0x1b, 0x4c, 0xd1, 0x9b, 0x30, 0x16, 0x90, 0x30, 0x72, 0x82, 0x48, 0xd4, 0x32, 0xa3, - 0xec, 0x88, 0x63, 0x58, 0x47, 0xf0, 0xeb, 0x44, 0x5c, 0x4d, 0x8c, 0xc1, 0x26, 0x07, 0xf4, 0x71, - 0x69, 0x24, 0x59, 0xf3, 0xdb, 0x5e, 0x14, 0xce, 0x0c, 0xb3, 0x76, 0x67, 0x5a, 0xd3, 0xaf, 0xc7, - 0x74, 0x49, 0x2b, 0x0a, 0x2f, 0x8c, 0x0d, 0x56, 0xe8, 0x93, 0x30, 0xc6, 0xff, 0x73, 0x23, 0x70, - 0x38, 0x73, 0x9c, 0xf1, 0x3e, 0x9b, 0xcf, 0x9b, 0x13, 0x2e, 0x1c, 0x17, 0xcc, 0xc7, 0x74, 0x68, - 0x88, 0x4d, 0x6e, 0x08, 0xc3, 0x58, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0xdf, 0x24, - 0x42, 0xa7, 0x7d, 0x32, 0xdb, 0x68, 0xec, 0xdf, 0x24, 0xe2, 0x12, 0xa8, 0x97, 0xc1, 0x26, 0x0b, - 0x74, 0x0d, 0xc6, 0x03, 0xe2, 0xd4, 0xdd, 0x98, 0xe9, 0x48, 0x37, 0xa6, 0xec, 0xe2, 0x8c, 0x8d, - 0x42, 0x38, 0xc1, 0x04, 0x5d, 0x85, 0x51, 0xd6, 0xe7, 0xed, 0x16, 0x67, 0x7a, 0xa2, 0x1b, 0x53, - 0xe6, 0x02, 0x51, 0xd5, 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x03, 0x86, 0x1b, 0xee, 0x26, 0xa9, 0xed, - 0xd5, 0x1a, 0x64, 0x66, 0x94, 0x71, 0xcb, 0xdc, 0x0c, 0x57, 0x25, 0x11, 0x97, 0xcf, 0xd5, 0x5f, - 0x1c, 0x17, 0x47, 0xd7, 0xe1, 0x44, 0x44, 0x82, 0xa6, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42, - 0x66, 0xcb, 0x1f, 0x63, 0xb3, 0xeb, 0x8c, 0x18, 0x8d, 0x13, 0x1b, 0x99, 0x54, 0x38, 0xa7, 0x34, - 0xba, 0x0d, 0x33, 0x19, 0x18, 0x3e, 0x6f, 0x8f, 0x31, 0xce, 0xaf, 0x09, 0xce, 0x33, 0x1b, 0x39, - 0x74, 0x07, 0x1d, 0x70, 0x38, 0x97, 0x3b, 0xba, 0x0a, 0x13, 0x6c, 0xe7, 0xac, 0xb4, 0x1b, 0x0d, - 0x51, 0xe1, 0x38, 0xab, 0xf0, 0x71, 0x29, 0x47, 0x94, 0x4d, 0xf4, 0xc1, 0x7e, 0x09, 0xe2, 0x7f, - 0x38, 0x59, 0x1a, 0xdd, 0x64, 0x66, 0xe3, 0x76, 0xe0, 0x46, 0x7b, 0x74, 0x55, 0x91, 0xdb, 0xd1, - 0xcc, 0x44, 0x47, 0x15, 0x9a, 0x4e, 0xaa, 0x6c, 0xcb, 0x3a, 0x10, 0x27, 0x19, 0xd2, 0xa3, 0x20, - 0x8c, 0xea, 0xae, 0x37, 0x33, 0xc9, 0xef, 0x53, 0x72, 0x27, 0xad, 0x52, 0x20, 0xe6, 0x38, 0x66, - 0x32, 0xa6, 0x3f, 0xae, 0xd2, 0x13, 0x77, 0x8a, 0x11, 0xc6, 0x26, 0x63, 0x89, 0xc0, 0x31, 0x0d, - 0x15, 0x82, 0xa3, 0x68, 0x6f, 0x06, 0x31, 0x52, 0xb5, 0x21, 0x6e, 0x6c, 0x7c, 0x1c, 0x53, 0xb8, - 0x7d, 0x13, 0xc6, 0xd5, 0x36, 0xc1, 0xfa, 0x04, 0x95, 0xa0, 0x9f, 0x89, 0x7d, 0x42, 0xe1, 0x3b, - 0x4c, 0x9b, 0xc0, 0x44, 0x42, 0xcc, 0xe1, 0xac, 0x09, 0xee, 0x1d, 0xb2, 0xb0, 0x17, 0x11, 0xae, - 0x8b, 0x28, 0x6a, 0x4d, 0x90, 0x08, 0x1c, 0xd3, 0xd8, 0xff, 0x9d, 0x8b, 0xcf, 0xf1, 0x29, 0xd1, - 0xc3, 0xb9, 0xf8, 0x0c, 0x0c, 0x31, 0x57, 0x15, 0x3f, 0xe0, 0xf6, 0xe4, 0xfe, 0x58, 0x60, 0xbe, - 0x2c, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61, 0xac, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18, - 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x43, 0xcc, 0x2b, 0xab, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25, - 0x93, 0xa1, 0x8a, 0x80, 0x1f, 0x68, 0xbf, 0xb1, 0xa2, 0x46, 0xe7, 0x60, 0x80, 0x36, 0xa1, 0x5c, - 0x11, 0xc7, 0xa9, 0xd2, 0x5d, 0x5e, 0x66, 0x50, 0x2c, 0xb0, 0xf6, 0x6f, 0x5a, 0x4c, 0x96, 0x4a, - 0xef, 0xf9, 0xe8, 0x32, 0x3b, 0x34, 0xd8, 0x09, 0xa2, 0xe9, 0x0e, 0x1f, 0xd3, 0x4e, 0x02, 0x85, - 0x3b, 0x48, 0xfc, 0xc7, 0x46, 0x49, 0xf4, 0x56, 0xf2, 0x64, 0xe0, 0x02, 0xc5, 0x0b, 0xb2, 0x0b, - 0x92, 0xa7, 0xc3, 0xc3, 0xf1, 0x11, 0x47, 0xdb, 0xd3, 0xe9, 0x88, 0xb0, 0xff, 0xd7, 0x82, 0x36, - 0x4b, 0xaa, 0x91, 0x13, 0x11, 0x54, 0x81, 0xc1, 0x5b, 0x8e, 0x1b, 0xb9, 0xde, 0x96, 0x90, 0xfb, - 0x3a, 0x1f, 0x74, 0xac, 0xd0, 0x0d, 0x5e, 0x80, 0x4b, 0x2f, 0xe2, 0x0f, 0x96, 0x6c, 0x28, 0xc7, - 0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, 0xc5, 0x1f, 0x2c, 0xd9, - 0xa0, 0xb7, 0x01, 0xe4, 0x0e, 0x41, 0xea, 0x42, 0x77, 0xf8, 0x4c, 0x77, 0xa6, 0x1b, 0xaa, 0x0c, - 0x57, 0x4e, 0xc6, 0xff, 0xb1, 0xc6, 0xcf, 0x8e, 0xb4, 0x31, 0xd5, 0x1b, 0x83, 0x3e, 0x41, 0x97, - 0xa8, 0x13, 0x44, 0xa4, 0x3e, 0x1f, 0x89, 0xce, 0x79, 0xaa, 0xb7, 0xcb, 0xe1, 0x86, 0xdb, 0x24, - 0xfa, 0x72, 0x16, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0x95, 0x22, 0xcc, 0xe4, 0x35, 0x97, 0x2e, 0x1a, - 0x72, 0xdb, 0x8d, 0x16, 0xa9, 0x58, 0x6b, 0x99, 0x8b, 0x66, 0x59, 0xc0, 0xb1, 0xa2, 0xa0, 0xb3, - 0x37, 0x74, 0xb7, 0xe4, 0xdd, 0xbe, 0x3f, 0x9e, 0xbd, 0x55, 0x06, 0xc5, 0x02, 0x4b, 0xe9, 0x02, - 0xe2, 0x84, 0xc2, 0x5d, 0x50, 0x9b, 0xe5, 0x98, 0x41, 0xb1, 0xc0, 0xea, 0x5a, 0xc6, 0xbe, 0x2e, - 0x5a, 0x46, 0xa3, 0x8b, 0xfa, 0xef, 0x6f, 0x17, 0xa1, 0x4f, 0x01, 0x6c, 0xba, 0x9e, 0x1b, 0x6e, - 0x33, 0xee, 0x03, 0x87, 0xe6, 0xae, 0x84, 0xe2, 0x15, 0xc5, 0x05, 0x6b, 0x1c, 0xd1, 0x8b, 0x30, - 0xa2, 0x36, 0x90, 0xf2, 0x12, 0x73, 0x56, 0xd0, 0x9c, 0xbf, 0xe2, 0xdd, 0x74, 0x09, 0xeb, 0x74, - 0xf6, 0x67, 0x92, 0xf3, 0x45, 0xac, 0x00, 0xad, 0x7f, 0xad, 0x5e, 0xfb, 0xb7, 0xd0, 0xb9, 0x7f, - 0xed, 0xdf, 0x1d, 0x84, 0x09, 0xa3, 0xb2, 0x76, 0xd8, 0xc3, 0x9e, 0x7b, 0x89, 0x1e, 0x40, 0x4e, - 0x44, 0xc4, 0xfa, 0xb3, 0xbb, 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82, - 0xe1, 0x86, 0x13, 0x32, 0x8d, 0x25, 0x11, 0xeb, 0xae, 0x17, 0x66, 0xf1, 0x85, 0xd0, 0x09, 0x23, - 0xed, 0xd4, 0xe7, 0xbc, 0x63, 0x96, 0xf4, 0xa4, 0xa4, 0xf2, 0x95, 0xf4, 0x47, 0x55, 0x8d, 0xa0, - 0x42, 0xd8, 0x1e, 0xe6, 0x38, 0xf4, 0x32, 0xdb, 0x5a, 0xe9, 0xac, 0x58, 0xa4, 0xd2, 0x28, 0x9b, - 0x66, 0xfd, 0x86, 0x90, 0xad, 0x70, 0xd8, 0xa0, 0x8c, 0xef, 0x64, 0x03, 0x1d, 0xee, 0x64, 0x4f, - 0xc2, 0x20, 0xfb, 0xa1, 0x66, 0x80, 0x1a, 0x8d, 0x32, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0xa1, - 0xde, 0x26, 0x0c, 0xbd, 0xf5, 0x89, 0x49, 0xcd, 0x1c, 0x45, 0x86, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, - 0x96, 0x38, 0xf4, 0xb3, 0x16, 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44, - 0xed, 0x57, 0xbb, 0x76, 0x7b, 0x3b, 0x9c, 0x9b, 0x4f, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13, - 0x51, 0x9a, 0x40, 0x3f, 0x8c, 0x56, 0xdd, 0x30, 0xfa, 0xdc, 0x9f, 0x27, 0x0e, 0xa7, 0x8c, 0x26, - 0xa1, 0x6b, 0xfa, 0xe5, 0x6b, 0xe4, 0x90, 0x97, 0xaf, 0xb1, 0xdc, 0x8b, 0xd7, 0x77, 0x27, 0x2e, - 0x30, 0xa3, 0xec, 0xcb, 0x1f, 0xef, 0x72, 0x81, 0x11, 0xea, 0xf4, 0x1e, 0xae, 0x31, 0xb3, 0x6d, - 0x78, 0x28, 0xa7, 0x8b, 0x32, 0x14, 0xbc, 0x4b, 0xba, 0x82, 0xb7, 0x8b, 0x5a, 0x70, 0x4e, 0x7e, - 0xc4, 0xdc, 0x9b, 0x6d, 0xc7, 0x8b, 0xdc, 0x68, 0x4f, 0x57, 0x08, 0x3f, 0x05, 0xe3, 0x4b, 0x0e, - 0x69, 0xfa, 0xde, 0xb2, 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x19, 0xe8, 0x63, 0xd2, 0x0d, 0xdf, - 0xdb, 0xfb, 0x68, 0xd3, 0x31, 0x83, 0xd8, 0x5b, 0x70, 0x7c, 0xc9, 0xbf, 0xe5, 0xdd, 0x72, 0x82, - 0xfa, 0x7c, 0xa5, 0xac, 0x29, 0xac, 0xd6, 0xa5, 0xc2, 0xc4, 0xca, 0xbf, 0x8e, 0x6a, 0x25, 0x79, - 0x2f, 0xac, 0xb8, 0x0d, 0x92, 0xa3, 0x56, 0xfc, 0x3f, 0x0a, 0x46, 0x4d, 0x31, 0xbd, 0x32, 0x8a, - 0x59, 0xb9, 0x1e, 0x00, 0x6f, 0xc2, 0xd0, 0xa6, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x45, 0xef, 0x3c, - 0x91, 0xef, 0x23, 0xb8, 0x42, 0x29, 0x95, 0xf5, 0x8e, 0xa9, 0x5b, 0x56, 0x44, 0x61, 0xac, 0xd8, - 0xa0, 0x1d, 0x98, 0x94, 0x7d, 0x28, 0xb1, 0x62, 0xc3, 0x79, 0xb2, 0xd3, 0xcc, 0x32, 0x99, 0x33, - 0x7f, 0x69, 0x9c, 0x60, 0x83, 0x53, 0x8c, 0xd1, 0x29, 0xe8, 0x6b, 0xd2, 0xa3, 0xb5, 0x8f, 0x75, - 0x3f, 0xd3, 0xaf, 0x30, 0x55, 0x11, 0x83, 0xda, 0x3f, 0x61, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x2a, - 0xb3, 0xfb, 0x3c, 0x0a, 0x49, 0x15, 0x56, 0xa1, 0xbb, 0x0a, 0xcb, 0xfe, 0xff, 0x2c, 0x38, 0xb6, - 0xdc, 0x6c, 0x45, 0x7b, 0x4b, 0xae, 0x69, 0xae, 0x7f, 0x09, 0x06, 0x9a, 0xa4, 0xee, 0xb6, 0x9b, - 0x62, 0xe4, 0x4a, 0xf2, 0xf8, 0x59, 0x63, 0xd0, 0x83, 0xfd, 0xd2, 0x58, 0x35, 0xf2, 0x03, 0x67, - 0x8b, 0x70, 0x00, 0x16, 0xe4, 0xec, 0x10, 0x77, 0xef, 0x90, 0x55, 0xb7, 0xe9, 0x46, 0xf7, 0x36, - 0xdb, 0x85, 0xa5, 0x5d, 0x32, 0xc1, 0x31, 0x3f, 0xfb, 0x1b, 0x16, 0x4c, 0xc8, 0x79, 0x3f, 0x5f, - 0xaf, 0x07, 0x24, 0x0c, 0xd1, 0x2c, 0x14, 0xdc, 0x96, 0x68, 0x25, 0x88, 0x56, 0x16, 0xca, 0x15, - 0x5c, 0x70, 0x5b, 0xf2, 0xbe, 0xc0, 0x4e, 0xb8, 0xa2, 0xe9, 0x74, 0x70, 0x59, 0xc0, 0xb1, 0xa2, - 0x40, 0xe7, 0x61, 0xc8, 0xf3, 0xeb, 0x5c, 0xe4, 0x16, 0x46, 0x5c, 0x4a, 0xb9, 0x2e, 0x60, 0x58, - 0x61, 0x51, 0x05, 0x86, 0xb9, 0x4b, 0x6a, 0x3c, 0x69, 0x7b, 0x72, 0x6c, 0x65, 0x5f, 0xb6, 0x21, - 0x4b, 0xe2, 0x98, 0x89, 0xfd, 0xdb, 0x16, 0x8c, 0xca, 0x2f, 0xeb, 0xf1, 0x32, 0x44, 0x97, 0x56, - 0x7c, 0x11, 0x8a, 0x97, 0x16, 0xbd, 0xcc, 0x30, 0x8c, 0x71, 0x87, 0x29, 0x1e, 0xea, 0x0e, 0x73, - 0x11, 0x46, 0x9c, 0x56, 0xab, 0x62, 0x5e, 0x80, 0xd8, 0x54, 0x9a, 0x8f, 0xc1, 0x58, 0xa7, 0xb1, - 0x7f, 0xbc, 0x00, 0xe3, 0xf2, 0x0b, 0xaa, 0xed, 0x9b, 0x21, 0x89, 0xd0, 0x06, 0x0c, 0x3b, 0x7c, - 0x94, 0x88, 0x9c, 0xe4, 0x8f, 0x66, 0x2b, 0xe6, 0x8c, 0x21, 0x8d, 0x25, 0xb9, 0x79, 0x59, 0x1a, - 0xc7, 0x8c, 0x50, 0x03, 0xa6, 0x3c, 0x3f, 0x62, 0xa7, 0xba, 0xc2, 0x77, 0xb2, 0x95, 0x26, 0xb9, - 0x9f, 0x14, 0xdc, 0xa7, 0xd6, 0x93, 0x5c, 0x70, 0x9a, 0x31, 0x5a, 0x96, 0xca, 0xce, 0x62, 0xbe, - 0x96, 0x4a, 0x1f, 0xb8, 0x6c, 0x5d, 0xa7, 0xfd, 0x1b, 0x16, 0x0c, 0x4b, 0xb2, 0xa3, 0x30, 0x8b, - 0xaf, 0xc1, 0x60, 0xc8, 0x06, 0x41, 0x76, 0x8d, 0xdd, 0xa9, 0xe1, 0x7c, 0xbc, 0x62, 0x61, 0x85, - 0xff, 0x0f, 0xb1, 0xe4, 0xc1, 0x6c, 0x5d, 0xaa, 0xf9, 0xef, 0x11, 0x5b, 0x97, 0x6a, 0x4f, 0xce, - 0xa1, 0xf4, 0x97, 0xac, 0xcd, 0x9a, 0xf2, 0x98, 0xca, 0xd4, 0xad, 0x80, 0x6c, 0xba, 0xb7, 0x93, - 0x32, 0x75, 0x85, 0x41, 0xb1, 0xc0, 0xa2, 0xb7, 0x61, 0xb4, 0x26, 0x8d, 0x1c, 0xf1, 0x0a, 0x3f, - 0xd7, 0xd1, 0xe0, 0xa6, 0x6c, 0xb3, 0x5c, 0x49, 0xb7, 0xa8, 0x95, 0xc7, 0x06, 0x37, 0xd3, 0xe5, - 0xaa, 0xd8, 0xcd, 0xe5, 0x2a, 0xe6, 0x9b, 0xef, 0x80, 0xf4, 0x93, 0x16, 0x0c, 0x70, 0xe5, 0x76, - 0x6f, 0xb6, 0x05, 0xcd, 0x54, 0x1d, 0xf7, 0xdd, 0x75, 0x0a, 0x14, 0x92, 0x06, 0x5a, 0x83, 0x61, - 0xf6, 0x83, 0x29, 0xe7, 0x8b, 0xf9, 0x0f, 0xb4, 0x78, 0xad, 0x7a, 0x03, 0xaf, 0xcb, 0x62, 0x38, - 0xe6, 0x60, 0xff, 0x58, 0x91, 0xee, 0x6e, 0x31, 0xa9, 0x71, 0xe8, 0x5b, 0x0f, 0xee, 0xd0, 0x2f, - 0x3c, 0xa8, 0x43, 0x7f, 0x0b, 0x26, 0x6a, 0x9a, 0x61, 0x3b, 0x1e, 0xc9, 0xf3, 0x1d, 0x27, 0x89, - 0x66, 0x03, 0xe7, 0xea, 0xbf, 0x45, 0x93, 0x09, 0x4e, 0x72, 0x45, 0x9f, 0x80, 0x51, 0x3e, 0xce, - 0xa2, 0x16, 0xee, 0xb5, 0xf6, 0x78, 0xfe, 0x7c, 0xd1, 0xab, 0xe0, 0xea, 0x62, 0xad, 0x38, 0x36, - 0x98, 0xd9, 0x7f, 0x6b, 0x01, 0x5a, 0x6e, 0x6d, 0x93, 0x26, 0x09, 0x9c, 0x46, 0x6c, 0x9f, 0xfa, - 0xa2, 0x05, 0x33, 0x24, 0x05, 0x5e, 0xf4, 0x9b, 0x4d, 0x71, 0x1b, 0xcd, 0x51, 0x98, 0x2c, 0xe7, - 0x94, 0x51, 0x4f, 0xc6, 0x66, 0xf2, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x0d, 0xa6, 0xf9, 0x29, 0xa9, - 0x10, 0x9a, 0x9b, 0xd8, 0xc3, 0x82, 0xf1, 0xf4, 0x46, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x63, - 0x0c, 0x72, 0x5b, 0xf1, 0xbe, 0x61, 0xee, 0x7d, 0xc3, 0xdc, 0xfb, 0x86, 0xb9, 0xf7, 0x0d, 0x73, - 0xef, 0x1b, 0xe6, 0xde, 0x37, 0xcc, 0xbd, 0x47, 0x0d, 0x73, 0xff, 0x9b, 0x05, 0xc7, 0xd5, 0xf1, - 0x65, 0x5c, 0xd8, 0x3f, 0x0b, 0xd3, 0x7c, 0xb9, 0x19, 0xde, 0xde, 0xe2, 0xb8, 0xbe, 0x98, 0x39, - 0x73, 0x13, 0xaf, 0x12, 0x8c, 0x82, 0xfc, 0x79, 0x57, 0x06, 0x02, 0x67, 0x55, 0x63, 0xff, 0xca, - 0x10, 0xf4, 0x2f, 0xef, 0x12, 0x2f, 0x3a, 0x82, 0xab, 0x4d, 0x0d, 0xc6, 0x5d, 0x6f, 0xd7, 0x6f, - 0xec, 0x92, 0x3a, 0xc7, 0x1f, 0xe6, 0x06, 0x7e, 0x42, 0xb0, 0x1e, 0x2f, 0x1b, 0x2c, 0x70, 0x82, - 0xe5, 0x83, 0x30, 0x6f, 0x5c, 0x82, 0x01, 0x7e, 0xf8, 0x08, 0xdb, 0x46, 0xe6, 0x9e, 0xcd, 0x3a, - 0x51, 0x1c, 0xa9, 0xb1, 0xe9, 0x85, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6f, 0xba, 0x41, - 0x18, 0x6d, 0xb8, 0x4d, 0x7a, 0x34, 0x34, 0x5b, 0xf7, 0x60, 0xce, 0x50, 0xfd, 0xb0, 0x62, 0x70, - 0xc2, 0x09, 0xce, 0x68, 0x0b, 0xc6, 0x1a, 0x8e, 0x5e, 0xd5, 0xe0, 0xa1, 0xab, 0x52, 0xa7, 0xc3, - 0xaa, 0xce, 0x08, 0x9b, 0x7c, 0xe9, 0x72, 0xaa, 0x31, 0x8d, 0xfc, 0x10, 0x53, 0x67, 0xa8, 0xe5, - 0xc4, 0x55, 0xf1, 0x1c, 0x47, 0x05, 0x34, 0xe6, 0x29, 0x3f, 0x6c, 0x0a, 0x68, 0x9a, 0x3f, 0xfc, - 0xa7, 0x61, 0x98, 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xa1, 0xb7, 0xb6, 0xae, 0xb9, 0xb5, - 0xc0, 0x37, 0x0d, 0x49, 0xcb, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x45, 0x18, 0x08, 0x49, 0xe0, 0x2a, - 0x65, 0x75, 0x87, 0x61, 0x64, 0x64, 0xfc, 0xfd, 0x20, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c, - 0xa6, 0x8a, 0x65, 0x87, 0x81, 0x36, 0xbd, 0xe6, 0x19, 0x14, 0x0b, 0x2c, 0x7a, 0x03, 0x06, 0x03, - 0xd2, 0x60, 0x96, 0xca, 0xb1, 0xde, 0x27, 0x39, 0x37, 0x7c, 0xf2, 0x72, 0x58, 0x32, 0x40, 0x57, - 0x00, 0x05, 0x84, 0x0a, 0x78, 0xae, 0xb7, 0xa5, 0xfc, 0xc7, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c, - 0x53, 0xc8, 0xa7, 0xa3, 0x38, 0xa3, 0x18, 0xba, 0x04, 0x53, 0x0a, 0x5a, 0xf6, 0xc2, 0xc8, 0xa1, - 0x1b, 0xdc, 0x04, 0xe3, 0xa5, 0xf4, 0x2b, 0x38, 0x49, 0x80, 0xd3, 0x65, 0xec, 0x9f, 0xb7, 0x80, - 0xf7, 0xf3, 0x11, 0x68, 0x15, 0x5e, 0x37, 0xb5, 0x0a, 0x27, 0x73, 0x47, 0x2e, 0x47, 0xa3, 0xf0, - 0xf3, 0x16, 0x8c, 0x68, 0x23, 0x1b, 0xcf, 0x59, 0xab, 0xc3, 0x9c, 0x6d, 0xc3, 0x24, 0x9d, 0xe9, - 0x57, 0x6f, 0x86, 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0xf7, 0x36, 0x31, 0x95, 0xaf, 0xea, - 0x6a, 0x82, 0x21, 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0xb9, 0xf6, 0xd6, 0xd4, 0x98, 0x27, - 0x5c, 0x7b, 0xd5, 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x6d, 0x3f, 0x8c, 0x92, 0xae, 0xbd, 0x97, - 0xfd, 0x30, 0xc2, 0x0c, 0x63, 0x3f, 0x0f, 0xb0, 0x7c, 0x9b, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d, - 0x56, 0xfe, 0xa5, 0xc7, 0xfe, 0x23, 0x0b, 0xc6, 0x57, 0x16, 0x8d, 0x93, 0x6b, 0x0e, 0x80, 0xdf, - 0xd4, 0x6e, 0xdc, 0x58, 0x97, 0xfe, 0x25, 0xdc, 0xc4, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x27, 0xa1, - 0xd8, 0x68, 0x7b, 0x42, 0xed, 0x39, 0x48, 0x8f, 0xc7, 0xd5, 0xb6, 0x87, 0x29, 0x4c, 0x7b, 0x36, - 0x56, 0xec, 0xf9, 0xd9, 0x58, 0xd7, 0xe8, 0x35, 0xa8, 0x04, 0xfd, 0xb7, 0x6e, 0xb9, 0x75, 0xfe, - 0x28, 0x5f, 0xf8, 0xbe, 0xdc, 0xb8, 0x51, 0x5e, 0x0a, 0x31, 0x87, 0xdb, 0x5f, 0x2a, 0xc2, 0xec, - 0x4a, 0x83, 0xdc, 0x7e, 0x97, 0x81, 0x09, 0x7a, 0x7d, 0xf4, 0x76, 0x38, 0x05, 0xd2, 0x61, 0x1f, - 0x36, 0x76, 0xef, 0x8f, 0x4d, 0x18, 0xe4, 0x9e, 0xad, 0x32, 0x4c, 0x41, 0xa6, 0x3d, 0x31, 0xbf, - 0x43, 0xe6, 0xb8, 0x87, 0xac, 0xb0, 0x27, 0xaa, 0x03, 0x53, 0x40, 0xb1, 0x64, 0x3e, 0xfb, 0x0a, - 0x8c, 0xea, 0x94, 0x87, 0x7a, 0x62, 0xfc, 0xfd, 0x45, 0x98, 0xa4, 0x2d, 0x78, 0xa0, 0x03, 0x71, - 0x2d, 0x3d, 0x10, 0xf7, 0xfb, 0x99, 0x69, 0xf7, 0xd1, 0x78, 0x3b, 0x39, 0x1a, 0x17, 0xf3, 0x46, - 0xe3, 0xa8, 0xc7, 0xe0, 0x07, 0x2c, 0x98, 0x5e, 0x69, 0xf8, 0xb5, 0x9d, 0xc4, 0x53, 0xd0, 0x17, - 0x61, 0x84, 0x6e, 0xc7, 0xa1, 0x11, 0x15, 0xc5, 0x88, 0x93, 0x23, 0x50, 0x58, 0xa7, 0xd3, 0x8a, - 0x5d, 0xbb, 0x56, 0x5e, 0xca, 0x0a, 0xaf, 0x23, 0x50, 0x58, 0xa7, 0xb3, 0xff, 0xc0, 0x82, 0xd3, - 0x97, 0x16, 0x97, 0xe3, 0xa9, 0x98, 0x8a, 0xf0, 0x73, 0x0e, 0x06, 0x5a, 0x75, 0xad, 0x29, 0xb1, - 0x5a, 0x78, 0x89, 0xb5, 0x42, 0x60, 0xdf, 0x2b, 0xc1, 0xb4, 0xae, 0x01, 0x5c, 0xc2, 0x95, 0x45, - 0xb1, 0xef, 0x4a, 0x2b, 0x90, 0x95, 0x6b, 0x05, 0x7a, 0x1c, 0x06, 0xe9, 0xb9, 0xe0, 0xd6, 0x64, - 0xbb, 0xb9, 0xc7, 0x00, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xce, 0x82, 0xe9, 0x4b, 0x6e, 0x44, 0x0f, - 0xed, 0x64, 0x08, 0x1b, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0x10, 0x36, 0x58, 0x61, - 0xb0, 0x46, 0xc5, 0x3f, 0x68, 0xd7, 0x65, 0x4f, 0x35, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56, - 0x14, 0xb4, 0xbf, 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0xb5, 0x24, 0x11, - 0x38, 0xa6, 0xb1, 0xff, 0xda, 0x82, 0xd2, 0x25, 0xfe, 0xe0, 0x74, 0x33, 0xcc, 0xd9, 0x74, 0x9f, - 0x87, 0x61, 0x22, 0x0d, 0x04, 0xf2, 0xf1, 0xad, 0x14, 0x44, 0x95, 0xe5, 0x80, 0x47, 0xd2, 0x51, - 0x74, 0x3d, 0xbc, 0x57, 0x3f, 0xdc, 0x83, 0xe3, 0x15, 0x40, 0x44, 0xaf, 0x4b, 0x0f, 0x2d, 0xc4, - 0x62, 0x94, 0x2c, 0xa7, 0xb0, 0x38, 0xa3, 0x84, 0xfd, 0x13, 0x16, 0x1c, 0x57, 0x1f, 0xfc, 0x9e, - 0xfb, 0x4c, 0xfb, 0x6b, 0x05, 0x18, 0xbb, 0xbc, 0xb1, 0x51, 0xb9, 0x44, 0x22, 0x6d, 0x56, 0x76, - 0x36, 0xfb, 0x63, 0xcd, 0x7a, 0xd9, 0xe9, 0x8e, 0xd8, 0x8e, 0xdc, 0xc6, 0x1c, 0x0f, 0x98, 0x37, - 0x57, 0xf6, 0xa2, 0xab, 0x41, 0x35, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa6, 0x4b, 0x99, 0xa5, 0x98, - 0x27, 0xb3, 0xa0, 0xe7, 0x61, 0x80, 0x45, 0xec, 0x93, 0x83, 0xf0, 0xb0, 0xba, 0x62, 0x31, 0xe8, - 0xc1, 0x7e, 0x69, 0xf8, 0x1a, 0x2e, 0xf3, 0x3f, 0x58, 0x90, 0xa2, 0x6b, 0x30, 0xb2, 0x1d, 0x45, - 0xad, 0xcb, 0xc4, 0xa9, 0x93, 0x40, 0xee, 0xb2, 0x67, 0xb2, 0x76, 0x59, 0xda, 0x09, 0x9c, 0x2c, - 0xde, 0x98, 0x62, 0x58, 0x88, 0x75, 0x3e, 0x76, 0x15, 0x20, 0xc6, 0xdd, 0x27, 0xc3, 0x8d, 0xbd, - 0x01, 0xc3, 0xf4, 0x73, 0xe7, 0x1b, 0xae, 0xd3, 0xd9, 0x34, 0xfe, 0x34, 0x0c, 0x4b, 0xc3, 0x77, - 0x28, 0xe2, 0x69, 0xb0, 0x13, 0x49, 0xda, 0xc5, 0x43, 0x1c, 0xe3, 0xed, 0xc7, 0x40, 0x38, 0xaf, - 0x76, 0x62, 0x69, 0x6f, 0xc2, 0x31, 0xe6, 0x85, 0xeb, 0x44, 0xdb, 0xc6, 0x1c, 0xed, 0x3e, 0x19, - 0x9e, 0x11, 0xf7, 0x3a, 0xfe, 0x65, 0x33, 0xda, 0xeb, 0xe7, 0x51, 0xc9, 0x31, 0xbe, 0xe3, 0xd9, - 0x7f, 0xd5, 0x07, 0x0f, 0x97, 0xab, 0xf9, 0x81, 0xa0, 0x5e, 0x86, 0x51, 0x2e, 0x2e, 0xd2, 0xa9, - 0xe1, 0x34, 0x44, 0xbd, 0x4a, 0x03, 0xba, 0xa1, 0xe1, 0xb0, 0x41, 0x89, 0x4e, 0x43, 0xd1, 0x7d, - 0xc7, 0x4b, 0xbe, 0x0d, 0x2c, 0xbf, 0xb9, 0x8e, 0x29, 0x9c, 0xa2, 0xa9, 0xe4, 0xc9, 0xb7, 0x74, - 0x85, 0x56, 0xd2, 0xe7, 0xeb, 0x30, 0xee, 0x86, 0xb5, 0xd0, 0x2d, 0x7b, 0x74, 0x9d, 0x6a, 0x2b, - 0x5d, 0xe9, 0x1c, 0x68, 0xa3, 0x15, 0x16, 0x27, 0xa8, 0xb5, 0xf3, 0xa5, 0xbf, 0x67, 0xe9, 0xb5, - 0x6b, 0x18, 0x0a, 0xba, 0xfd, 0xb7, 0xd8, 0xd7, 0x85, 0x4c, 0x05, 0x2f, 0xb6, 0x7f, 0xfe, 0xc1, - 0x21, 0x96, 0x38, 0x7a, 0xa1, 0xab, 0x6d, 0x3b, 0xad, 0xf9, 0x76, 0xb4, 0xbd, 0xe4, 0x86, 0x35, - 0x7f, 0x97, 0x04, 0x7b, 0xec, 0x2e, 0x3e, 0x14, 0x5f, 0xe8, 0x14, 0x62, 0xf1, 0xf2, 0x7c, 0x85, - 0x52, 0xe2, 0x74, 0x19, 0x34, 0x0f, 0x13, 0x12, 0x58, 0x25, 0x21, 0x3b, 0x02, 0x46, 0x18, 0x1b, - 0xf5, 0x5a, 0x4f, 0x80, 0x15, 0x93, 0x24, 0xbd, 0x29, 0xe0, 0xc2, 0xfd, 0x10, 0x70, 0x5f, 0x82, - 0x31, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x3f, 0xe2, 0xd7, 0x6e, 0xa6, 0x60, 0x2e, 0xeb, - 0x08, 0x6c, 0xd2, 0xd9, 0xff, 0xa1, 0x0f, 0xa6, 0xd8, 0xb0, 0xbd, 0x3f, 0xc3, 0xbe, 0x93, 0x66, - 0xd8, 0xb5, 0xf4, 0x0c, 0xbb, 0x1f, 0x92, 0xfb, 0x3d, 0x4f, 0xb3, 0xcf, 0xc0, 0xb0, 0x7a, 0xa0, - 0x28, 0x5f, 0x28, 0x5b, 0x39, 0x2f, 0x94, 0xbb, 0x9f, 0xde, 0xd2, 0x25, 0xad, 0x98, 0xe9, 0x92, - 0xf6, 0x15, 0x0b, 0x62, 0xc3, 0x02, 0x7a, 0x13, 0x86, 0x5b, 0x3e, 0x73, 0xa1, 0x0d, 0xa4, 0x5f, - 0xfa, 0x63, 0x1d, 0x2d, 0x13, 0x3c, 0x16, 0x5e, 0xc0, 0x7b, 0xa1, 0x22, 0x8b, 0xe2, 0x98, 0x0b, - 0xba, 0x02, 0x83, 0xad, 0x80, 0x54, 0x23, 0x16, 0xa8, 0xa9, 0x77, 0x86, 0x7c, 0xd6, 0xf0, 0x82, - 0x58, 0x72, 0xb0, 0x7f, 0xb1, 0x00, 0x93, 0x49, 0x52, 0xf4, 0x1a, 0xf4, 0x91, 0xdb, 0xa4, 0x26, - 0xda, 0x9b, 0x79, 0x14, 0xc7, 0xaa, 0x09, 0xde, 0x01, 0xf4, 0x3f, 0x66, 0xa5, 0xd0, 0x65, 0x18, - 0xa4, 0xe7, 0xf0, 0x25, 0x15, 0x94, 0xf0, 0x91, 0xbc, 0xb3, 0x5c, 0x09, 0x34, 0xbc, 0x71, 0x02, - 0x84, 0x65, 0x71, 0xe6, 0x07, 0x56, 0x6b, 0x55, 0xe9, 0x15, 0x27, 0xea, 0x74, 0x13, 0xdf, 0x58, - 0xac, 0x70, 0x22, 0xc1, 0x8d, 0xfb, 0x81, 0x49, 0x20, 0x8e, 0x99, 0xa0, 0x8f, 0x42, 0x7f, 0xd8, - 0x20, 0xa4, 0x25, 0x0c, 0xfd, 0x99, 0xca, 0xc5, 0x2a, 0x25, 0x10, 0x9c, 0x98, 0x32, 0x82, 0x01, - 0x30, 0x2f, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x1d, 0xe7, 0x1c, 0x6f, 0x8b, 0x1c, 0x81, 0x3e, 0x7e, - 0x09, 0xfa, 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0x0f, 0x8f, 0xdb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0x67, - 0xe9, 0x3f, 0xcc, 0x4a, 0xdb, 0x3f, 0x08, 0x30, 0x1e, 0x93, 0x95, 0x23, 0xd2, 0x44, 0xcf, 0x1a, - 0x71, 0x51, 0x4e, 0x26, 0xe2, 0xa2, 0x0c, 0x33, 0x6a, 0x4d, 0xf5, 0xfb, 0x19, 0x28, 0x36, 0x9d, - 0xdb, 0x42, 0xb7, 0xf7, 0x74, 0xe7, 0x66, 0x50, 0xfe, 0x73, 0x6b, 0xce, 0x6d, 0x7e, 0xfd, 0x7d, - 0x5a, 0xae, 0xb1, 0x35, 0xe7, 0x76, 0x57, 0x1f, 0x66, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x4f, - 0x58, 0x4f, 0x75, 0xb9, 0x5e, 0xb2, 0x2e, 0xd7, 0xeb, 0xa1, 0x2e, 0xd7, 0x43, 0x77, 0x60, 0x50, - 0xb8, 0x6c, 0x8a, 0x10, 0x73, 0x17, 0x7a, 0xa8, 0x4f, 0x78, 0x7c, 0xf2, 0x3a, 0x2f, 0xc8, 0xeb, - 0xbd, 0x80, 0x76, 0xad, 0x57, 0x56, 0x88, 0xfe, 0x77, 0x0b, 0xc6, 0xc5, 0x6f, 0x4c, 0xde, 0x69, - 0x93, 0x30, 0x12, 0xe2, 0xef, 0x87, 0x7a, 0x6f, 0x83, 0x28, 0xc8, 0x9b, 0xf2, 0x21, 0x79, 0x52, - 0x99, 0xc8, 0xae, 0x2d, 0x4a, 0xb4, 0x02, 0xfd, 0xa2, 0x05, 0xc7, 0x9a, 0xce, 0x6d, 0x5e, 0x23, - 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xfa, 0xf0, 0x5a, 0x6f, 0xc3, 0x9f, 0x2a, 0xce, 0x1b, 0x29, - 0xed, 0x9c, 0xc7, 0xb2, 0x48, 0xba, 0x36, 0x35, 0xb3, 0x5d, 0xb3, 0x9b, 0x30, 0x24, 0xe7, 0xdb, - 0x83, 0xf4, 0x0f, 0x67, 0xf5, 0x88, 0xb9, 0xf6, 0x40, 0xeb, 0xf9, 0x0c, 0x8c, 0xea, 0x73, 0xec, - 0x81, 0xd6, 0xf5, 0x0e, 0x4c, 0x67, 0xcc, 0xa5, 0x07, 0x5a, 0xe5, 0x2d, 0x38, 0x99, 0x3b, 0x3f, - 0x1e, 0xa8, 0x7f, 0xff, 0xd7, 0x2c, 0x7d, 0x1f, 0x3c, 0x02, 0xa3, 0xc8, 0xa2, 0x69, 0x14, 0x39, - 0xd3, 0x79, 0xe5, 0xe4, 0x58, 0x46, 0xde, 0xd6, 0x1b, 0x4d, 0x77, 0x75, 0xf4, 0x06, 0x0c, 0x34, - 0x28, 0x44, 0x3a, 0xfe, 0xda, 0xdd, 0x57, 0x64, 0x2c, 0x8e, 0x32, 0x78, 0x88, 0x05, 0x07, 0xfb, - 0x57, 0x2d, 0xe8, 0x3b, 0x82, 0x9e, 0xc0, 0x66, 0x4f, 0x3c, 0x9b, 0xcb, 0x5a, 0x04, 0xff, 0x9f, - 0xc3, 0xce, 0xad, 0xe5, 0xdb, 0x11, 0xf1, 0x42, 0x76, 0xa6, 0x67, 0x76, 0xcc, 0xbe, 0x05, 0xd3, - 0xab, 0xbe, 0x53, 0x5f, 0x70, 0x1a, 0x8e, 0x57, 0x23, 0x41, 0xd9, 0xdb, 0x3a, 0x94, 0xd7, 0x7a, - 0xa1, 0xab, 0xd7, 0xfa, 0xcb, 0x30, 0xe0, 0xb6, 0xb4, 0xe8, 0xe1, 0x67, 0x69, 0x07, 0x96, 0x2b, - 0x22, 0x70, 0x38, 0x32, 0x2a, 0x67, 0x50, 0x2c, 0xe8, 0xe9, 0xc8, 0x73, 0x77, 0xb1, 0xbe, 0xfc, - 0x91, 0xa7, 0x52, 0x7c, 0x32, 0xc6, 0x94, 0xe1, 0xd8, 0xbc, 0x0d, 0x46, 0x15, 0xe2, 0x59, 0x19, - 0x86, 0x41, 0x97, 0x7f, 0xa9, 0x18, 0xfe, 0x27, 0xb2, 0xa5, 0xeb, 0x54, 0xc7, 0x68, 0x0f, 0xa6, - 0x38, 0x00, 0x4b, 0x46, 0xf6, 0xcb, 0x90, 0x19, 0x13, 0xa4, 0xbb, 0xe6, 0xc4, 0xfe, 0x38, 0x4c, - 0xb1, 0x92, 0x87, 0xd4, 0x4a, 0xd8, 0x09, 0x7d, 0x6f, 0x46, 0x20, 0x58, 0xfb, 0xdf, 0x5a, 0x80, - 0xd6, 0xfc, 0xba, 0xbb, 0xb9, 0x27, 0x98, 0xf3, 0xef, 0x7f, 0x07, 0x4a, 0xfc, 0xda, 0x97, 0x0c, - 0x96, 0xba, 0xd8, 0x70, 0xc2, 0x50, 0xd3, 0x35, 0x3f, 0x21, 0xea, 0x2d, 0x6d, 0x74, 0x26, 0xc7, - 0xdd, 0xf8, 0xa1, 0x37, 0x13, 0x91, 0xe0, 0x3e, 0x9c, 0x8a, 0x04, 0xf7, 0x44, 0xa6, 0xc7, 0x47, - 0xba, 0xf5, 0x32, 0x42, 0x9c, 0xfd, 0x05, 0x0b, 0x26, 0xd6, 0x13, 0xc1, 0x3f, 0xcf, 0x31, 0xf3, - 0x77, 0x86, 0x0d, 0xa5, 0xca, 0xa0, 0x58, 0x60, 0xef, 0xbb, 0x8e, 0xf1, 0x1f, 0x2c, 0x88, 0x63, - 0x10, 0x1d, 0x81, 0x54, 0xbb, 0x68, 0x48, 0xb5, 0x99, 0x37, 0x04, 0xd5, 0x9c, 0x3c, 0xa1, 0x16, - 0x5d, 0x51, 0x63, 0xd2, 0xe1, 0x72, 0x10, 0xb3, 0xe1, 0xeb, 0x6c, 0xdc, 0x1c, 0x38, 0x35, 0x1a, - 0x7f, 0x52, 0x00, 0xa4, 0x68, 0x7b, 0x8e, 0x1e, 0x98, 0x2e, 0x71, 0x7f, 0xa2, 0x07, 0xee, 0x02, - 0x62, 0x0e, 0x1c, 0x81, 0xe3, 0x85, 0x9c, 0xad, 0x2b, 0xb4, 0xaa, 0x87, 0xf3, 0x0e, 0x99, 0x95, - 0xcf, 0x09, 0x57, 0x53, 0xdc, 0x70, 0x46, 0x0d, 0x9a, 0x63, 0x4e, 0x7f, 0xaf, 0x8e, 0x39, 0x03, - 0x5d, 0xde, 0xc5, 0x7e, 0xd5, 0x82, 0x31, 0xd5, 0x4d, 0xef, 0x91, 0xc7, 0x0d, 0xaa, 0x3d, 0x39, - 0xe7, 0x4a, 0x45, 0x6b, 0x32, 0x3b, 0x6f, 0xbf, 0x8b, 0xbd, 0x6f, 0x76, 0x1a, 0xee, 0x1d, 0xa2, - 0xc2, 0xf2, 0x96, 0xc4, 0x7b, 0x65, 0x01, 0x3d, 0xd8, 0x2f, 0x8d, 0xa9, 0x7f, 0x3c, 0xac, 0x66, - 0x5c, 0xc4, 0xfe, 0x69, 0xba, 0xd8, 0xcd, 0xa9, 0x88, 0x5e, 0x84, 0xfe, 0xd6, 0xb6, 0x13, 0x92, - 0xc4, 0x23, 0xb0, 0xfe, 0x0a, 0x05, 0x1e, 0xec, 0x97, 0xc6, 0x55, 0x01, 0x06, 0xc1, 0x9c, 0xba, - 0xf7, 0x98, 0x8c, 0xe9, 0xc9, 0xd9, 0x35, 0x26, 0xe3, 0xdf, 0x5a, 0xd0, 0xb7, 0x4e, 0x4f, 0xaf, - 0x07, 0xbf, 0x05, 0xbc, 0x6e, 0x6c, 0x01, 0xa7, 0xf2, 0x12, 0xd4, 0xe4, 0xae, 0xfe, 0x95, 0xc4, - 0xea, 0x3f, 0x93, 0xcb, 0xa1, 0xf3, 0xc2, 0x6f, 0xc2, 0x08, 0x4b, 0x7b, 0x23, 0x1e, 0xbc, 0x3d, - 0x6f, 0x2c, 0xf8, 0x52, 0x62, 0xc1, 0x4f, 0x68, 0xa4, 0xda, 0x4a, 0x7f, 0x12, 0x06, 0xc5, 0x0b, - 0xaa, 0xe4, 0x33, 0x71, 0x41, 0x8b, 0x25, 0xde, 0xfe, 0xc9, 0x22, 0x18, 0x69, 0x76, 0xd0, 0x6f, - 0x58, 0x30, 0x17, 0x70, 0xcf, 0xea, 0xfa, 0x52, 0x3b, 0x70, 0xbd, 0xad, 0x6a, 0x6d, 0x9b, 0xd4, - 0xdb, 0x0d, 0xd7, 0xdb, 0x2a, 0x6f, 0x79, 0xbe, 0x02, 0x2f, 0xdf, 0x26, 0xb5, 0x36, 0xb3, 0x7a, - 0x76, 0xc9, 0xe9, 0xa3, 0x5e, 0x28, 0x3c, 0x77, 0x77, 0xbf, 0x34, 0x87, 0x0f, 0xc5, 0x1b, 0x1f, - 0xb2, 0x2d, 0xe8, 0x0f, 0x2c, 0xb8, 0xc0, 0xd3, 0xbd, 0xf4, 0xde, 0xfe, 0x0e, 0x4a, 0x84, 0x8a, - 0x64, 0x15, 0x33, 0xd9, 0x20, 0x41, 0x73, 0xe1, 0x25, 0xd1, 0xa1, 0x17, 0x2a, 0x87, 0xab, 0x0b, - 0x1f, 0xb6, 0x71, 0xf6, 0x3f, 0x2d, 0xc2, 0x98, 0x88, 0xdd, 0x27, 0xce, 0x80, 0x17, 0x8d, 0x29, - 0xf1, 0x48, 0x62, 0x4a, 0x4c, 0x19, 0xc4, 0xf7, 0x67, 0xfb, 0x0f, 0x61, 0x8a, 0x6e, 0xce, 0x97, - 0x89, 0x13, 0x44, 0x37, 0x89, 0xc3, 0xfd, 0xed, 0x8a, 0x87, 0xde, 0xfd, 0x95, 0xe2, 0x77, 0x35, - 0xc9, 0x0c, 0xa7, 0xf9, 0x7f, 0x27, 0x9d, 0x39, 0x1e, 0x4c, 0xa6, 0xc2, 0x2f, 0xbe, 0x05, 0xc3, - 0xea, 0xf9, 0x8f, 0xd8, 0x74, 0x3a, 0x47, 0x31, 0x4d, 0x72, 0xe0, 0x7a, 0xc5, 0xf8, 0xe9, 0x59, - 0xcc, 0xce, 0xfe, 0x47, 0x05, 0xa3, 0x42, 0x3e, 0x88, 0xeb, 0x30, 0xe4, 0x84, 0x2c, 0xb2, 0x72, - 0xbd, 0x93, 0xea, 0x37, 0x55, 0x0d, 0x7b, 0x82, 0x35, 0x2f, 0x4a, 0x62, 0xc5, 0x03, 0x5d, 0xe6, - 0x5e, 0x8d, 0xbb, 0xa4, 0x93, 0xde, 0x37, 0xc5, 0x0d, 0xa4, 0xdf, 0xe3, 0x2e, 0xc1, 0xa2, 0x3c, - 0xfa, 0x24, 0x77, 0x3b, 0xbd, 0xe2, 0xf9, 0xb7, 0xbc, 0x4b, 0xbe, 0x2f, 0xe3, 0xb4, 0xf4, 0xc6, - 0x70, 0x4a, 0x3a, 0x9b, 0xaa, 0xe2, 0xd8, 0xe4, 0xd6, 0x5b, 0x3c, 0xe3, 0xcf, 0x02, 0x4b, 0x6f, - 0x61, 0xbe, 0xb6, 0x0f, 0x11, 0x81, 0x09, 0x11, 0x18, 0x52, 0xc2, 0x44, 0xdf, 0x65, 0xde, 0x70, - 0xcd, 0xd2, 0xb1, 0x85, 0xe2, 0x8a, 0xc9, 0x02, 0x27, 0x79, 0xda, 0x3f, 0x6b, 0x01, 0x7b, 0x79, - 0x7c, 0x04, 0xf2, 0xc8, 0x47, 0x4c, 0x79, 0x64, 0x26, 0xaf, 0x93, 0x73, 0x44, 0x91, 0x17, 0xf8, - 0xcc, 0xaa, 0x04, 0xfe, 0xed, 0x3d, 0xe1, 0x2b, 0xd4, 0xfd, 0x72, 0x65, 0x7f, 0xc9, 0x02, 0x96, - 0xa1, 0x05, 0xf3, 0xbb, 0xb4, 0xd4, 0xec, 0x77, 0x37, 0x83, 0x7f, 0x0c, 0x86, 0x36, 0x89, 0x13, - 0xb5, 0x03, 0x11, 0x67, 0xca, 0xec, 0x0b, 0xa3, 0xc1, 0x26, 0xef, 0x15, 0x51, 0x4a, 0xbc, 0x20, - 0x14, 0xff, 0xb0, 0xe2, 0x66, 0x87, 0x30, 0x9b, 0x5f, 0x0a, 0x5d, 0x83, 0x87, 0x02, 0x52, 0x6b, - 0x07, 0x21, 0x9d, 0xa7, 0xe2, 0x56, 0x22, 0xde, 0xe0, 0x58, 0xec, 0xf6, 0xf2, 0xf0, 0xdd, 0xfd, - 0xd2, 0x43, 0x38, 0x9b, 0x04, 0xe7, 0x95, 0xb5, 0xbf, 0x87, 0x1f, 0xb6, 0x2a, 0x34, 0x6e, 0x13, - 0xa6, 0x3c, 0xed, 0x3f, 0x3d, 0x5a, 0xe4, 0x1d, 0xfa, 0xb1, 0x6e, 0xc7, 0x29, 0x3b, 0x87, 0xb4, - 0xe7, 0xcd, 0x09, 0x36, 0x38, 0xcd, 0xd9, 0xfe, 0x29, 0x0b, 0x1e, 0xd2, 0x09, 0xb5, 0x17, 0x54, - 0xdd, 0xec, 0x50, 0x4b, 0x30, 0xe4, 0xb7, 0x48, 0xe0, 0x44, 0x7e, 0x20, 0xce, 0x8f, 0xf3, 0x72, - 0x92, 0x5d, 0x15, 0xf0, 0x03, 0x91, 0x5c, 0x44, 0x72, 0x97, 0x70, 0xac, 0x4a, 0xd2, 0x4b, 0x36, - 0x53, 0x7e, 0x85, 0xe2, 0xad, 0x1c, 0xdb, 0x0d, 0x98, 0x4b, 0x43, 0x88, 0x05, 0xc6, 0xfe, 0x2b, - 0x8b, 0x4f, 0x31, 0xbd, 0xe9, 0xe8, 0x1d, 0x98, 0x6c, 0x3a, 0x51, 0x6d, 0x7b, 0xf9, 0x76, 0x2b, - 0xe0, 0x56, 0x3d, 0xd9, 0x4f, 0x4f, 0x77, 0xeb, 0x27, 0xed, 0x23, 0x63, 0x9f, 0xda, 0xb5, 0x04, - 0x33, 0x9c, 0x62, 0x8f, 0x6e, 0xc2, 0x08, 0x83, 0xb1, 0x67, 0xa0, 0x61, 0x27, 0x21, 0x21, 0xaf, - 0x36, 0xe5, 0x15, 0xb2, 0x16, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0x2b, 0x45, 0xbe, 0xee, 0x99, 0x50, - 0xff, 0x24, 0x0c, 0xb6, 0xfc, 0xfa, 0x62, 0x79, 0x09, 0x8b, 0x51, 0x50, 0x07, 0x4a, 0x85, 0x83, - 0xb1, 0xc4, 0xa3, 0xf3, 0x30, 0x24, 0x7e, 0x4a, 0x2b, 0x2c, 0x9b, 0xe6, 0x82, 0x2e, 0xc4, 0x0a, - 0x8b, 0x9e, 0x03, 0x68, 0x05, 0xfe, 0xae, 0x5b, 0x67, 0x71, 0x67, 0x8a, 0xa6, 0x43, 0x57, 0x45, - 0x61, 0xb0, 0x46, 0x85, 0x5e, 0x85, 0xb1, 0xb6, 0x17, 0x72, 0xc1, 0x44, 0x8b, 0xee, 0xad, 0x5c, - 0x8d, 0xae, 0xe9, 0x48, 0x6c, 0xd2, 0xa2, 0x79, 0x18, 0x88, 0x1c, 0xe6, 0xa0, 0xd4, 0x9f, 0xef, - 0x77, 0xbd, 0x41, 0x29, 0xf4, 0xcc, 0x5f, 0xb4, 0x00, 0x16, 0x05, 0xd1, 0x5b, 0xf2, 0x45, 0x36, - 0xdf, 0xe2, 0xc5, 0x83, 0x87, 0xde, 0x8e, 0x03, 0xed, 0x3d, 0xb6, 0x78, 0x48, 0x61, 0xf0, 0x42, - 0xaf, 0x00, 0x90, 0xdb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0xdc, 0x0a, 0x95, 0x84, 0xb0, 0xe4, 0xaf, - 0xfb, 0xd1, 0xb5, 0x90, 0x2c, 0x2b, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x06, 0x00, 0xb1, 0x04, 0x8f, - 0xee, 0xc0, 0x50, 0xcd, 0x69, 0x39, 0x35, 0x9e, 0xd6, 0xb2, 0x98, 0xf7, 0x50, 0x36, 0x2e, 0x31, - 0xb7, 0x28, 0xc8, 0xb9, 0xe1, 0x41, 0x06, 0x48, 0x1e, 0x92, 0xe0, 0xae, 0xc6, 0x06, 0x55, 0x1f, - 0xfa, 0xbc, 0x05, 0x23, 0x22, 0xbc, 0x0e, 0x1b, 0xa1, 0x42, 0xbe, 0xad, 0x48, 0xab, 0x7f, 0x3e, - 0x2e, 0xc1, 0x9b, 0xf0, 0xbc, 0x9c, 0xa1, 0x1a, 0xa6, 0x6b, 0x2b, 0xf4, 0x8a, 0xd1, 0x07, 0xe5, - 0xa5, 0xb1, 0x68, 0x74, 0xa5, 0xba, 0x34, 0x0e, 0xb3, 0xd3, 0x42, 0xbf, 0x2f, 0x5e, 0x33, 0xee, - 0x8b, 0x7d, 0xf9, 0x4f, 0x4e, 0x0d, 0x41, 0xb6, 0xdb, 0x55, 0x11, 0x55, 0xf4, 0xf0, 0x13, 0xfd, - 0xf9, 0xef, 0x24, 0xb5, 0x1b, 0x53, 0x97, 0xd0, 0x13, 0x9f, 0x81, 0x89, 0xba, 0x29, 0x0e, 0x88, - 0x99, 0xf8, 0x44, 0x1e, 0xdf, 0x84, 0xf4, 0x10, 0x0b, 0x00, 0x09, 0x04, 0x4e, 0x32, 0x46, 0x15, - 0x1e, 0x8d, 0xa4, 0xec, 0x6d, 0xfa, 0xe2, 0xd1, 0x8d, 0x9d, 0x3b, 0x96, 0x7b, 0x61, 0x44, 0x9a, - 0x94, 0x32, 0x3e, 0xe7, 0xd7, 0x45, 0x59, 0xac, 0xb8, 0xa0, 0x37, 0x60, 0x80, 0x3d, 0x94, 0x0b, - 0x67, 0x86, 0xf2, 0x55, 0xf2, 0x66, 0xdc, 0xc7, 0x78, 0x41, 0xb2, 0xbf, 0x21, 0x16, 0x1c, 0xd0, - 0x65, 0xf9, 0x0c, 0x35, 0x2c, 0x7b, 0xd7, 0x42, 0xc2, 0x9e, 0xa1, 0x0e, 0x2f, 0x3c, 0x16, 0xbf, - 0x30, 0xe5, 0xf0, 0xcc, 0xfc, 0xa0, 0x46, 0x49, 0x2a, 0x4f, 0x89, 0xff, 0x32, 0xed, 0xa8, 0x88, - 0x52, 0x95, 0xd9, 0x3c, 0x33, 0x35, 0x69, 0xdc, 0x9d, 0xd7, 0x4d, 0x16, 0x38, 0xc9, 0x93, 0xca, - 0xa6, 0x7c, 0xd5, 0x8b, 0x67, 0x3b, 0xdd, 0xf6, 0x0e, 0x7e, 0x25, 0x67, 0xa7, 0x11, 0x87, 0x60, - 0x51, 0x1e, 0xb9, 0x30, 0x11, 0x18, 0x22, 0x82, 0x0c, 0x2e, 0x75, 0xae, 0x37, 0x39, 0x44, 0x0b, - 0x5b, 0x6e, 0xb2, 0xc1, 0x49, 0xbe, 0xb3, 0x3b, 0x30, 0x66, 0x6c, 0x10, 0x0f, 0xd4, 0xe4, 0xe5, - 0xc1, 0x64, 0x72, 0x37, 0x78, 0xa0, 0x96, 0xae, 0xbf, 0xe8, 0x83, 0x71, 0x73, 0xf6, 0xa2, 0x0b, - 0x30, 0x2c, 0x98, 0xa8, 0x2c, 0x41, 0x6a, 0x41, 0xae, 0x49, 0x04, 0x8e, 0x69, 0x58, 0x72, 0x28, - 0x56, 0x5c, 0x73, 0x09, 0x8f, 0x93, 0x43, 0x29, 0x0c, 0xd6, 0xa8, 0xe8, 0x6d, 0xee, 0xa6, 0xef, - 0x47, 0xea, 0xec, 0x53, 0x53, 0x7c, 0x81, 0x41, 0xb1, 0xc0, 0xd2, 0x33, 0x6f, 0x87, 0x04, 0x1e, - 0x69, 0x98, 0x41, 0xe7, 0xd5, 0x99, 0x77, 0x45, 0x47, 0x62, 0x93, 0x96, 0x9e, 0xdc, 0x7e, 0xc8, - 0xd6, 0x8c, 0xb8, 0x33, 0xc6, 0x2e, 0xf6, 0x55, 0x1e, 0x2c, 0x40, 0xe2, 0xd1, 0xc7, 0xe1, 0x21, - 0x15, 0xe0, 0x4d, 0xcc, 0x08, 0x59, 0xe3, 0x80, 0xa1, 0xe2, 0x79, 0x68, 0x31, 0x9b, 0x0c, 0xe7, - 0x95, 0x47, 0xaf, 0xc3, 0xb8, 0xb8, 0x57, 0x48, 0x8e, 0x83, 0xa6, 0xbf, 0xd8, 0x15, 0x03, 0x8b, - 0x13, 0xd4, 0x32, 0x6c, 0x3e, 0x13, 0xed, 0x25, 0x87, 0xa1, 0x74, 0xd8, 0x7c, 0x1d, 0x8f, 0x53, - 0x25, 0xd0, 0x3c, 0x4c, 0x70, 0x71, 0xcf, 0xf5, 0xb6, 0xf8, 0x98, 0x88, 0x07, 0x7c, 0x6a, 0x21, - 0x5c, 0x35, 0xd1, 0x38, 0x49, 0x8f, 0x5e, 0x86, 0x51, 0x27, 0xa8, 0x6d, 0xbb, 0x11, 0xa9, 0x51, - 0x69, 0x9c, 0xb9, 0x6c, 0x69, 0x0e, 0x77, 0xf3, 0x1a, 0x0e, 0x1b, 0x94, 0xf6, 0x1d, 0x98, 0xce, - 0x88, 0x22, 0x42, 0x27, 0x8e, 0xd3, 0x72, 0xe5, 0x37, 0x25, 0xbc, 0xda, 0xe7, 0x2b, 0x65, 0xf9, - 0x35, 0x1a, 0x15, 0x9d, 0x9d, 0x2c, 0xda, 0x88, 0x96, 0xd0, 0x58, 0xcd, 0xce, 0x15, 0x89, 0xc0, - 0x31, 0x8d, 0xfd, 0x77, 0x05, 0x98, 0xc8, 0xb0, 0x56, 0xb1, 0xa4, 0xba, 0x89, 0x0b, 0x4e, 0x9c, - 0x43, 0xd7, 0xcc, 0xc2, 0x50, 0x38, 0x44, 0x16, 0x86, 0x62, 0xb7, 0x2c, 0x0c, 0x7d, 0xef, 0x26, - 0x0b, 0x83, 0xd9, 0x63, 0xfd, 0x3d, 0xf5, 0x58, 0x46, 0xe6, 0x86, 0x81, 0x43, 0x66, 0x6e, 0x30, - 0x3a, 0x7d, 0xb0, 0x87, 0x4e, 0xff, 0xb1, 0x02, 0x4c, 0x26, 0x0d, 0x5d, 0x47, 0xa0, 0x2c, 0x7e, - 0xc3, 0x50, 0x16, 0x9f, 0xef, 0xe5, 0xc1, 0x75, 0xae, 0xe2, 0x18, 0x27, 0x14, 0xc7, 0x4f, 0xf5, - 0xc4, 0xad, 0xb3, 0x12, 0xf9, 0xff, 0x2e, 0xc0, 0xf1, 0x4c, 0xfb, 0xdf, 0x11, 0xf4, 0xcd, 0x55, - 0xa3, 0x6f, 0x9e, 0xed, 0xf9, 0x31, 0x7a, 0x6e, 0x07, 0xdd, 0x48, 0x74, 0xd0, 0x85, 0xde, 0x59, - 0x76, 0xee, 0xa5, 0x6f, 0x14, 0xe1, 0x4c, 0x66, 0xb9, 0x58, 0xd7, 0xba, 0x62, 0xe8, 0x5a, 0x9f, - 0x4b, 0xe8, 0x5a, 0xed, 0xce, 0xa5, 0xef, 0x8f, 0xf2, 0x55, 0x3c, 0xca, 0x66, 0xa1, 0x25, 0xee, - 0x51, 0xf1, 0x6a, 0x3c, 0xca, 0x56, 0x8c, 0xb0, 0xc9, 0xf7, 0x3b, 0x49, 0xe1, 0xfa, 0xfb, 0x16, - 0x9c, 0xcc, 0x1c, 0x9b, 0x23, 0x50, 0xb0, 0xad, 0x9b, 0x0a, 0xb6, 0x27, 0x7b, 0x9e, 0xad, 0x39, - 0x1a, 0xb7, 0x2f, 0x0c, 0xe4, 0x7c, 0x0b, 0x53, 0x1a, 0x5c, 0x85, 0x11, 0xa7, 0x56, 0x23, 0x61, - 0xb8, 0xe6, 0xd7, 0x55, 0xc0, 0xf6, 0x67, 0xd9, 0x95, 0x2e, 0x06, 0x1f, 0xec, 0x97, 0x66, 0x93, - 0x2c, 0x62, 0x34, 0xd6, 0x39, 0xa0, 0x4f, 0xc2, 0x50, 0x28, 0x73, 0xed, 0xf5, 0xdd, 0x7b, 0xae, - 0x3d, 0xa6, 0x8f, 0x50, 0x4a, 0x11, 0xc5, 0x12, 0x7d, 0xb7, 0x1e, 0xe4, 0xa7, 0x83, 0x46, 0x8f, - 0x37, 0xf2, 0x1e, 0x42, 0xfd, 0x3c, 0x07, 0xb0, 0xab, 0x6e, 0x1f, 0x49, 0x85, 0x87, 0x76, 0x2f, - 0xd1, 0xa8, 0xd0, 0x47, 0x61, 0x32, 0xe4, 0xf1, 0x2d, 0x63, 0x8f, 0x0d, 0x3e, 0x17, 0x59, 0x88, - 0xb0, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0x68, 0x45, 0xd6, 0xca, 0x7c, 0x73, 0xf8, 0xf4, 0x3c, 0x17, - 0xd7, 0x28, 0xfc, 0x73, 0x8e, 0x25, 0x07, 0x81, 0x75, 0xbf, 0x56, 0x12, 0x7d, 0x12, 0x80, 0x4e, - 0x22, 0xa1, 0xf8, 0x18, 0xcc, 0xdf, 0x42, 0xe9, 0xde, 0x52, 0xcf, 0x74, 0x58, 0x67, 0xaf, 0xa9, - 0x97, 0x14, 0x13, 0xac, 0x31, 0x44, 0x0e, 0x8c, 0xc5, 0xff, 0xe2, 0xbc, 0xd7, 0xe7, 0x73, 0x6b, - 0x48, 0x32, 0x67, 0xda, 0xf6, 0x25, 0x9d, 0x05, 0x36, 0x39, 0xa2, 0x4f, 0xc0, 0xc9, 0xdd, 0x5c, - 0x37, 0x98, 0xe1, 0x38, 0x95, 0x65, 0xbe, 0xf3, 0x4b, 0x7e, 0x79, 0xfb, 0x5f, 0x02, 0x3c, 0xdc, - 0x61, 0xa7, 0x47, 0xf3, 0xa6, 0x09, 0xfb, 0xe9, 0xa4, 0x36, 0x62, 0x36, 0xb3, 0xb0, 0xa1, 0x9e, - 0x48, 0x2c, 0xa8, 0xc2, 0xbb, 0x5e, 0x50, 0x3f, 0x62, 0x69, 0x7a, 0x22, 0xee, 0x43, 0xfc, 0x91, - 0x43, 0x9e, 0x60, 0xf7, 0x51, 0x71, 0xb4, 0x99, 0xa1, 0x7d, 0x79, 0xae, 0xe7, 0xe6, 0xf4, 0xae, - 0x8e, 0xf9, 0x5a, 0x76, 0x48, 0x6a, 0xae, 0x98, 0xb9, 0x74, 0xd8, 0xef, 0x3f, 0xaa, 0xf0, 0xd4, - 0x7f, 0x62, 0xc1, 0xc9, 0x14, 0x98, 0xb7, 0x81, 0x84, 0x22, 0xa8, 0xd9, 0xfa, 0xbb, 0x6e, 0xbc, - 0x64, 0xc8, 0xbf, 0xe1, 0xb2, 0xf8, 0x86, 0x93, 0xb9, 0x74, 0xc9, 0xa6, 0x7f, 0xf1, 0xcf, 0x4b, - 0xd3, 0xac, 0x02, 0x93, 0x10, 0xe7, 0x37, 0x1d, 0xb5, 0xe0, 0x6c, 0xad, 0x1d, 0x04, 0xf1, 0x64, - 0xcd, 0x58, 0x9c, 0xfc, 0xae, 0xf7, 0xd8, 0xdd, 0xfd, 0xd2, 0xd9, 0xc5, 0x2e, 0xb4, 0xb8, 0x2b, - 0x37, 0xe4, 0x01, 0x6a, 0xa6, 0x9c, 0xcd, 0x44, 0xba, 0xfb, 0x4c, 0xdd, 0x49, 0xda, 0x35, 0x8d, - 0xbf, 0x9a, 0xcd, 0x70, 0x59, 0xcb, 0xe0, 0x7c, 0xb4, 0xda, 0x93, 0x6f, 0x4d, 0x38, 0xf0, 0xd9, - 0x55, 0x38, 0xd3, 0x79, 0x32, 0x1d, 0xea, 0xc5, 0xfe, 0x1f, 0x59, 0x70, 0xba, 0x63, 0x58, 0xa8, - 0x6f, 0xc3, 0xcb, 0x82, 0xfd, 0x39, 0x0b, 0x1e, 0xc9, 0x2c, 0x61, 0xf8, 0x35, 0x5e, 0x80, 0xe1, - 0x5a, 0x22, 0x59, 0x73, 0x1c, 0x20, 0x45, 0x25, 0x6a, 0x8e, 0x69, 0x0c, 0xf7, 0xc5, 0x42, 0x57, - 0xf7, 0xc5, 0xdf, 0xb6, 0x20, 0x75, 0xd4, 0x1f, 0x81, 0xe4, 0x59, 0x36, 0x25, 0xcf, 0xc7, 0x7a, - 0xe9, 0xcd, 0x1c, 0xa1, 0xf3, 0x6f, 0x26, 0xe0, 0x44, 0xce, 0x83, 0xdb, 0x5d, 0x98, 0xda, 0xaa, - 0x11, 0x33, 0xc2, 0x42, 0xa7, 0xc8, 0x63, 0x1d, 0xc3, 0x31, 0xf0, 0x1c, 0xd9, 0x29, 0x12, 0x9c, - 0xae, 0x02, 0x7d, 0xce, 0x82, 0x63, 0xce, 0xad, 0x70, 0x99, 0xde, 0x20, 0xdc, 0xda, 0x42, 0xc3, - 0xaf, 0xed, 0x50, 0xc1, 0x4c, 0x2e, 0xab, 0x17, 0x32, 0x15, 0xc8, 0x37, 0xaa, 0x29, 0x7a, 0xa3, - 0xfa, 0x99, 0xbb, 0xfb, 0xa5, 0x63, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x84, 0x45, 0x4e, 0x22, 0x27, - 0xda, 0xee, 0x14, 0x03, 0x24, 0xeb, 0x65, 0x34, 0x17, 0x89, 0x25, 0x06, 0x2b, 0x3e, 0xe8, 0xd3, - 0x30, 0xbc, 0x25, 0x9f, 0xfb, 0x67, 0x88, 0xdc, 0x71, 0x47, 0x76, 0x0e, 0x82, 0xc0, 0xfd, 0x41, - 0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa1, 0xe8, 0x6d, 0x86, 0x22, 0x12, 0x59, 0xb6, 0x5b, 0xaa, - 0xe9, 0xf8, 0xcb, 0x23, 0xed, 0xac, 0xaf, 0x54, 0x31, 0x2d, 0x88, 0x2e, 0x43, 0x31, 0xb8, 0x59, - 0x17, 0xd6, 0x8f, 0xcc, 0x45, 0x8a, 0x17, 0x96, 0x72, 0x5a, 0xc5, 0x38, 0xe1, 0x85, 0x25, 0x4c, - 0x59, 0xa0, 0x0a, 0xf4, 0xb3, 0x57, 0xaa, 0x42, 0xb4, 0xcd, 0xbc, 0xca, 0x77, 0x78, 0xed, 0xcd, - 0x5f, 0xc0, 0x31, 0x02, 0xcc, 0x19, 0xa1, 0x0d, 0x18, 0xa8, 0xb1, 0x04, 0xf4, 0x42, 0x96, 0xfd, - 0x60, 0xa6, 0x9d, 0xa3, 0x43, 0x66, 0x7e, 0xa1, 0xf6, 0x67, 0x14, 0x58, 0xf0, 0x62, 0x5c, 0x49, - 0x6b, 0x7b, 0x53, 0x9e, 0x58, 0xd9, 0x5c, 0x49, 0x6b, 0x7b, 0xa5, 0xda, 0x91, 0x2b, 0xa3, 0xc0, - 0x82, 0x17, 0x7a, 0x05, 0x0a, 0x9b, 0x35, 0xf1, 0x02, 0x35, 0xd3, 0xe0, 0x61, 0x06, 0x4b, 0x5a, - 0x18, 0xb8, 0xbb, 0x5f, 0x2a, 0xac, 0x2c, 0xe2, 0xc2, 0x66, 0x0d, 0xad, 0xc3, 0xe0, 0x26, 0x0f, - 0xaf, 0x22, 0x6c, 0x1a, 0x4f, 0x64, 0x47, 0x7e, 0x49, 0x45, 0x60, 0xe1, 0xaf, 0x19, 0x05, 0x02, - 0x4b, 0x26, 0x2c, 0x45, 0x8e, 0x0a, 0x13, 0x23, 0xa2, 0x54, 0xce, 0x1d, 0x2e, 0xb4, 0x0f, 0xbf, - 0x6a, 0xc4, 0xc1, 0x66, 0xb0, 0xc6, 0x91, 0xce, 0x6a, 0xe7, 0x4e, 0x3b, 0x60, 0x29, 0x0c, 0x44, - 0x38, 0xb3, 0xcc, 0x59, 0x3d, 0x2f, 0x89, 0x3a, 0xcd, 0x6a, 0x45, 0x84, 0x63, 0xa6, 0x68, 0x07, - 0xc6, 0x76, 0xc3, 0xd6, 0x36, 0x91, 0x4b, 0x9a, 0x45, 0x37, 0xcb, 0x91, 0x66, 0xaf, 0x0b, 0x42, - 0x37, 0x88, 0xda, 0x4e, 0x23, 0xb5, 0x0b, 0xb1, 0x6b, 0xcd, 0x75, 0x9d, 0x19, 0x36, 0x79, 0xd3, - 0xee, 0x7f, 0xa7, 0xed, 0xdf, 0xdc, 0x8b, 0x88, 0x08, 0x2e, 0x99, 0xd9, 0xfd, 0x6f, 0x72, 0x92, - 0x74, 0xf7, 0x0b, 0x04, 0x96, 0x4c, 0xd0, 0x75, 0xd1, 0x3d, 0x6c, 0xf7, 0x9c, 0xcc, 0x8f, 0x5c, - 0x3d, 0x2f, 0x89, 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31, 0x2b, 0xb6, 0x4b, 0xb6, 0xb6, 0xfd, 0xc8, - 0xf7, 0x12, 0x3b, 0xf4, 0x54, 0xfe, 0x2e, 0x59, 0xc9, 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38, - 0xb3, 0x2e, 0x54, 0x87, 0xf1, 0x96, 0x1f, 0x44, 0xb7, 0xfc, 0x40, 0xce, 0x2f, 0xd4, 0x41, 0x51, - 0x6a, 0x50, 0x8a, 0x1a, 0x59, 0xdc, 0x56, 0x13, 0x83, 0x13, 0x3c, 0xd1, 0xc7, 0x60, 0x30, 0xac, - 0x39, 0x0d, 0x52, 0xbe, 0x3a, 0x33, 0x9d, 0x7f, 0xfc, 0x54, 0x39, 0x49, 0xce, 0xec, 0xe2, 0xd1, - 0x71, 0x38, 0x09, 0x96, 0xec, 0xd0, 0x0a, 0xf4, 0xb3, 0xd4, 0xb3, 0x2c, 0x12, 0x6a, 0x4e, 0x00, - 0xee, 0xd4, 0x1b, 0x13, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, 0xe9, 0x1a, 0x10, 0x9a, 0x02, 0x3f, - 0x9c, 0x39, 0x9e, 0xbf, 0x06, 0x84, 0x82, 0xe1, 0x6a, 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, - 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe8, 0xe0, 0x3f, 0x98, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, - 0xa4, 0x94, 0x85, 0xfd, 0x9b, 0x43, 0x69, 0x99, 0x85, 0x69, 0x98, 0xfe, 0x67, 0x2b, 0xe5, 0xe7, - 0xf0, 0xa1, 0x5e, 0x15, 0xde, 0xf7, 0xf1, 0xe2, 0xfa, 0x39, 0x0b, 0x4e, 0xb4, 0x32, 0x3f, 0x44, - 0x08, 0x00, 0xbd, 0xe9, 0xcd, 0xf9, 0xa7, 0xab, 0xa8, 0xb9, 0xd9, 0x78, 0x9c, 0x53, 0x53, 0x52, - 0x39, 0x50, 0x7c, 0xd7, 0xca, 0x81, 0x35, 0x18, 0xaa, 0xf1, 0x9b, 0x9c, 0x8c, 0xf6, 0xde, 0x53, - 0xcc, 0x47, 0x26, 0x4a, 0x88, 0x2b, 0xe0, 0x26, 0x56, 0x2c, 0xd0, 0x8f, 0x5a, 0x70, 0x3a, 0xd9, - 0x74, 0x4c, 0x18, 0x5a, 0x84, 0xda, 0xe5, 0x6a, 0xad, 0x15, 0xf1, 0xfd, 0x29, 0xf9, 0xdf, 0x20, - 0x3e, 0xe8, 0x46, 0x80, 0x3b, 0x57, 0x86, 0x96, 0x32, 0xf4, 0x6a, 0x03, 0xa6, 0x45, 0xb1, 0x07, - 0xdd, 0xda, 0x0b, 0x30, 0xda, 0xf4, 0xdb, 0x5e, 0x24, 0xdc, 0x0d, 0x85, 0xc3, 0x13, 0x73, 0xf4, - 0x59, 0xd3, 0xe0, 0xd8, 0xa0, 0x4a, 0x68, 0xe4, 0x86, 0xee, 0x59, 0x23, 0xf7, 0x36, 0x8c, 0x7a, - 0x9a, 0x7f, 0x7c, 0xa7, 0x1b, 0xac, 0xd0, 0x2e, 0x6a, 0xd4, 0xbc, 0x95, 0x3a, 0x04, 0x1b, 0xdc, - 0x3a, 0x6b, 0xcb, 0xe0, 0xdd, 0x69, 0xcb, 0x8e, 0xf4, 0x4a, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc6, - 0xc0, 0xb5, 0x72, 0xaf, 0x99, 0x5a, 0xb9, 0x73, 0x49, 0xad, 0x5c, 0xca, 0x54, 0x65, 0x28, 0xe4, - 0x7a, 0xcf, 0x79, 0xd7, 0x73, 0x1c, 0xdf, 0xef, 0xb7, 0xe0, 0x21, 0x66, 0xfb, 0xa0, 0x15, 0xbc, - 0x6b, 0x7b, 0x07, 0x73, 0x05, 0x5d, 0xcd, 0x66, 0x87, 0xf3, 0xea, 0xb1, 0x1b, 0x70, 0xb6, 0xdb, - 0xb9, 0xcb, 0x1c, 0x6b, 0xeb, 0xca, 0x39, 0x22, 0x76, 0xac, 0xad, 0x97, 0x97, 0x30, 0xc3, 0xf4, - 0x1a, 0xa5, 0xce, 0xfe, 0x8f, 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x08, 0x6e, 0xf4, 0x1f, 0x31, 0x6e, - 0xf4, 0x0f, 0x67, 0x9f, 0xf8, 0xf5, 0x5c, 0x63, 0xdf, 0x72, 0xc2, 0xd8, 0x77, 0x3a, 0x8f, 0x41, - 0x67, 0xd3, 0xde, 0x4f, 0x17, 0x61, 0xa4, 0xe2, 0xd7, 0xd5, 0x3a, 0xfb, 0x67, 0xf7, 0xf2, 0xaa, - 0x25, 0x37, 0xc9, 0x90, 0xc6, 0x99, 0x79, 0xe1, 0xca, 0x38, 0x07, 0xdf, 0x66, 0x8f, 0x5b, 0x6e, - 0x10, 0x77, 0x6b, 0x3b, 0x22, 0xf5, 0xe4, 0xe7, 0x1c, 0xdd, 0xe3, 0x96, 0x6f, 0x16, 0x61, 0x22, - 0x51, 0x3b, 0x6a, 0xc0, 0x58, 0x43, 0x37, 0x25, 0x89, 0x79, 0x7a, 0x4f, 0x56, 0x28, 0xf1, 0x38, - 0x40, 0x03, 0x61, 0x93, 0x39, 0x9a, 0x03, 0x50, 0xbe, 0x15, 0x52, 0xdb, 0xcf, 0xae, 0x35, 0xca, - 0xf9, 0x22, 0xc4, 0x1a, 0x05, 0x7a, 0x11, 0x46, 0x22, 0xbf, 0xe5, 0x37, 0xfc, 0xad, 0xbd, 0x2b, - 0x44, 0x06, 0x30, 0x54, 0x8e, 0xbe, 0x1b, 0x31, 0x0a, 0xeb, 0x74, 0xe8, 0x36, 0x4c, 0x29, 0x26, - 0xd5, 0xfb, 0x60, 0x5e, 0x63, 0x6a, 0x93, 0xf5, 0x24, 0x47, 0x9c, 0xae, 0x04, 0xbd, 0x02, 0xe3, - 0xcc, 0xe3, 0x98, 0x95, 0xbf, 0x42, 0xf6, 0x64, 0x60, 0x5b, 0x26, 0x61, 0xaf, 0x19, 0x18, 0x9c, - 0xa0, 0x44, 0x8b, 0x30, 0xd5, 0x74, 0xc3, 0x44, 0xf1, 0x01, 0x56, 0x9c, 0x35, 0x60, 0x2d, 0x89, - 0xc4, 0x69, 0x7a, 0xfb, 0xe7, 0xc4, 0x18, 0x7b, 0x91, 0xfb, 0xfe, 0x72, 0x7c, 0x6f, 0x2f, 0xc7, - 0x6f, 0x58, 0x30, 0x49, 0x6b, 0x67, 0x6e, 0x94, 0x52, 0x90, 0x52, 0xa9, 0x0f, 0xac, 0x0e, 0xa9, - 0x0f, 0xce, 0xd1, 0x6d, 0xbb, 0xee, 0xb7, 0x23, 0xa1, 0x1d, 0xd5, 0xf6, 0x65, 0x0a, 0xc5, 0x02, - 0x2b, 0xe8, 0x48, 0x10, 0x88, 0x47, 0xe0, 0x3a, 0x1d, 0x09, 0x02, 0x2c, 0xb0, 0x32, 0x33, 0x42, - 0x5f, 0x76, 0x66, 0x04, 0x1e, 0xe0, 0x5a, 0x78, 0xc1, 0x09, 0x91, 0x56, 0x0b, 0x70, 0x2d, 0xdd, - 0xe3, 0x62, 0x1a, 0xfb, 0x6b, 0x45, 0x18, 0xad, 0xf8, 0xf5, 0xd8, 0xb1, 0xe3, 0x05, 0xc3, 0xb1, - 0xe3, 0x6c, 0xc2, 0xb1, 0x63, 0x52, 0xa7, 0x7d, 0xdf, 0x8d, 0xe3, 0x5b, 0xe5, 0xc6, 0xf1, 0x5b, - 0x16, 0x1b, 0xb5, 0xa5, 0xf5, 0x2a, 0xf7, 0xca, 0x45, 0x17, 0x61, 0x84, 0xed, 0x70, 0x2c, 0xea, - 0x80, 0xf4, 0x76, 0x60, 0x99, 0x0a, 0xd7, 0x63, 0x30, 0xd6, 0x69, 0xd0, 0x79, 0x18, 0x0a, 0x89, - 0x13, 0xd4, 0xb6, 0xd5, 0xf6, 0x2e, 0x5c, 0x13, 0x38, 0x0c, 0x2b, 0x2c, 0x7a, 0x33, 0x8e, 0xad, - 0x5c, 0xcc, 0x77, 0xf1, 0xd5, 0xdb, 0xc3, 0x97, 0x48, 0x7e, 0x40, 0x65, 0xfb, 0x06, 0xa0, 0x34, - 0x7d, 0x0f, 0xcf, 0x9e, 0x4a, 0x66, 0xf4, 0xcf, 0xe1, 0x54, 0xe4, 0xcf, 0xbf, 0xb7, 0x60, 0xbc, - 0xe2, 0xd7, 0xe9, 0xd2, 0xfd, 0x4e, 0x5a, 0xa7, 0x7a, 0x60, 0xf9, 0x81, 0x0e, 0x81, 0xe5, 0x1f, - 0x85, 0xfe, 0x8a, 0x5f, 0xef, 0x12, 0xa1, 0xf4, 0xff, 0xb1, 0x60, 0xb0, 0xe2, 0xd7, 0x8f, 0xc0, - 0xf0, 0xf2, 0x9a, 0x69, 0x78, 0x79, 0x28, 0x67, 0xde, 0xe4, 0xd8, 0x5a, 0xfe, 0xaf, 0x3e, 0x18, - 0xa3, 0xed, 0xf4, 0xb7, 0xe4, 0x50, 0x1a, 0xdd, 0x66, 0xf5, 0xd0, 0x6d, 0xf4, 0x1a, 0xe0, 0x37, - 0x1a, 0xfe, 0xad, 0xe4, 0xb0, 0xae, 0x30, 0x28, 0x16, 0x58, 0xf4, 0x0c, 0x0c, 0xb5, 0x02, 0xb2, - 0xeb, 0xfa, 0x42, 0xbe, 0xd6, 0xcc, 0x58, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e, 0xbc, 0x43, 0xd7, - 0xa3, 0xb2, 0x44, 0xcd, 0xf7, 0xea, 0xdc, 0x36, 0x51, 0x14, 0xd9, 0x8f, 0x34, 0x38, 0x36, 0xa8, - 0xd0, 0x0d, 0x18, 0x66, 0xff, 0xd9, 0xb6, 0x73, 0xf8, 0xc4, 0xee, 0x22, 0x1f, 0xac, 0x60, 0x80, - 0x63, 0x5e, 0xe8, 0x39, 0x80, 0x48, 0x66, 0x10, 0x09, 0x45, 0xa4, 0x4a, 0x75, 0x17, 0x51, 0xb9, - 0x45, 0x42, 0xac, 0x51, 0xa1, 0xa7, 0x61, 0x38, 0x72, 0xdc, 0xc6, 0xaa, 0xeb, 0x31, 0xfb, 0x3d, - 0x6d, 0xbf, 0x48, 0xcb, 0x2a, 0x80, 0x38, 0xc6, 0x53, 0x59, 0x90, 0xc5, 0x20, 0x5a, 0xd8, 0x8b, - 0x44, 0x06, 0xb2, 0x22, 0x97, 0x05, 0x57, 0x15, 0x14, 0x6b, 0x14, 0x68, 0x1b, 0x4e, 0xb9, 0x1e, - 0xcb, 0x14, 0x44, 0xaa, 0x3b, 0x6e, 0x6b, 0x63, 0xb5, 0x7a, 0x9d, 0x04, 0xee, 0xe6, 0xde, 0x82, - 0x53, 0xdb, 0x21, 0x9e, 0x4c, 0xd9, 0xfd, 0x98, 0x68, 0xe2, 0xa9, 0x72, 0x07, 0x5a, 0xdc, 0x91, - 0x93, 0xfd, 0x3c, 0x9b, 0xef, 0x57, 0xab, 0xe8, 0x29, 0x63, 0xeb, 0x38, 0xa1, 0x6f, 0x1d, 0x07, - 0xfb, 0xa5, 0x81, 0xab, 0x55, 0x2d, 0x10, 0xce, 0xcb, 0x70, 0xbc, 0xe2, 0xd7, 0x2b, 0x7e, 0x10, - 0xad, 0xf8, 0xc1, 0x2d, 0x27, 0xa8, 0xcb, 0xe9, 0x55, 0x92, 0xa1, 0x80, 0xe8, 0xfe, 0xd9, 0xcf, - 0x77, 0x17, 0x23, 0xcc, 0xcf, 0xf3, 0x4c, 0x62, 0x3b, 0xe4, 0x1b, 0xcf, 0x1a, 0x93, 0x1d, 0x54, - 0xae, 0xad, 0x4b, 0x4e, 0x44, 0xd0, 0x55, 0x18, 0xab, 0xe9, 0xc7, 0xa8, 0x28, 0xfe, 0xa4, 0x3c, - 0xc8, 0x8c, 0x33, 0x36, 0xf3, 0xdc, 0x35, 0xcb, 0xdb, 0xdf, 0x23, 0x2a, 0xe1, 0x8a, 0x08, 0xee, - 0xd2, 0xda, 0x4b, 0x56, 0x7b, 0x99, 0x8c, 0xa7, 0x90, 0x1f, 0x68, 0x91, 0xdb, 0x95, 0x3b, 0x26, - 0xe3, 0xb1, 0xbf, 0x17, 0x4e, 0x24, 0xab, 0xef, 0x39, 0xb5, 0xfe, 0x22, 0x4c, 0x05, 0x7a, 0x41, - 0x2d, 0xb3, 0xe1, 0x71, 0x9e, 0x40, 0x25, 0x81, 0xc4, 0x69, 0x7a, 0xfb, 0x45, 0x98, 0xa2, 0x97, - 0x5f, 0x25, 0xc8, 0xb1, 0x5e, 0xee, 0x1e, 0x13, 0xe9, 0x8f, 0x07, 0xd8, 0x41, 0x94, 0x48, 0x73, - 0x85, 0x3e, 0x05, 0xe3, 0x21, 0x59, 0x75, 0xbd, 0xf6, 0x6d, 0xa9, 0x5b, 0xeb, 0xf0, 0xb8, 0xb9, - 0xba, 0xac, 0x53, 0xf2, 0xfb, 0x83, 0x09, 0xc3, 0x09, 0x6e, 0xa8, 0x09, 0xe3, 0xb7, 0x5c, 0xaf, - 0xee, 0xdf, 0x0a, 0x25, 0xff, 0xa1, 0x7c, 0x45, 0xfd, 0x0d, 0x4e, 0x99, 0x68, 0xa3, 0x51, 0xdd, - 0x0d, 0x83, 0x19, 0x4e, 0x30, 0xa7, 0x8b, 0x3d, 0x68, 0x7b, 0xf3, 0xe1, 0xb5, 0x90, 0xf0, 0x47, - 0xaa, 0x62, 0xb1, 0x63, 0x09, 0xc4, 0x31, 0x9e, 0x2e, 0x76, 0xf6, 0xe7, 0x52, 0xe0, 0xb7, 0x79, - 0x4e, 0x25, 0xb1, 0xd8, 0xb1, 0x82, 0x62, 0x8d, 0x82, 0x6e, 0x86, 0xec, 0xdf, 0xba, 0xef, 0x61, - 0xdf, 0x8f, 0xe4, 0xf6, 0xc9, 0x72, 0x02, 0x6a, 0x70, 0x6c, 0x50, 0xa1, 0x15, 0x40, 0x61, 0xbb, - 0xd5, 0x6a, 0x30, 0xd7, 0x45, 0xa7, 0xc1, 0x58, 0x71, 0xb7, 0xab, 0x22, 0xf7, 0x6e, 0xa9, 0xa6, - 0xb0, 0x38, 0xa3, 0x04, 0x3d, 0x17, 0x37, 0x45, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0x51, 0xaf, 0xca, - 0xdb, 0x29, 0x71, 0x68, 0x19, 0x06, 0xc3, 0xbd, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x19, 0x18, 0xab, - 0x8c, 0x44, 0x4b, 0x00, 0xcc, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x5a, 0x70, 0x5c, 0xdc, 0x76, - 0x3c, 0x95, 0x17, 0x8e, 0x7b, 0xef, 0x5d, 0xbc, 0xbb, 0x5f, 0x9a, 0x16, 0x35, 0xeb, 0xe8, 0x83, - 0xfd, 0x12, 0x5d, 0x1c, 0x19, 0x18, 0x9c, 0xc5, 0x8d, 0x4f, 0xbe, 0x5a, 0xcd, 0x6f, 0xb6, 0x2a, - 0x81, 0xbf, 0xe9, 0x36, 0x48, 0x27, 0xc3, 0x68, 0xd5, 0xa0, 0x14, 0x93, 0xcf, 0x80, 0xe1, 0x04, - 0x37, 0x74, 0x13, 0x26, 0x9c, 0x56, 0x6b, 0x3e, 0x68, 0xfa, 0x81, 0xac, 0x60, 0x24, 0x5f, 0xc3, - 0x3e, 0x6f, 0x92, 0xf2, 0xb4, 0x70, 0x09, 0x20, 0x4e, 0x32, 0xb4, 0xbf, 0x87, 0xc9, 0xa7, 0x55, - 0x77, 0xcb, 0x63, 0xef, 0xc6, 0x51, 0x13, 0xc6, 0x5a, 0x6c, 0x07, 0x13, 0xd9, 0x94, 0xc4, 0x7a, - 0x7a, 0xa1, 0x47, 0x1d, 0xdb, 0x2d, 0x96, 0x0f, 0xd2, 0xf0, 0xb5, 0xac, 0xe8, 0xec, 0xb0, 0xc9, - 0xdd, 0xfe, 0x57, 0x27, 0x99, 0x84, 0x53, 0xe5, 0x8a, 0xb3, 0x41, 0xf1, 0x0a, 0x4e, 0x5c, 0x95, - 0x67, 0xf3, 0x55, 0xd4, 0xf1, 0xd0, 0x8b, 0x97, 0x74, 0x58, 0x96, 0x45, 0x9f, 0x84, 0x71, 0x7a, - 0xf3, 0x54, 0x52, 0x46, 0x38, 0x73, 0x2c, 0x3f, 0x6e, 0x91, 0xa2, 0xd2, 0x33, 0xad, 0xe9, 0x85, - 0x71, 0x82, 0x19, 0x7a, 0x93, 0xb9, 0x1f, 0x4a, 0xd6, 0x85, 0x5e, 0x58, 0xeb, 0x9e, 0x86, 0x92, - 0xad, 0xc6, 0x04, 0xb5, 0x61, 0x3a, 0x9d, 0x4f, 0x36, 0x9c, 0xb1, 0xf3, 0x45, 0xf8, 0x74, 0x4a, - 0xd8, 0x38, 0x25, 0x56, 0x1a, 0x17, 0xe2, 0x2c, 0xfe, 0x68, 0x35, 0x99, 0xed, 0xb3, 0x68, 0x28, - 0xb7, 0x53, 0x19, 0x3f, 0xc7, 0x3a, 0x26, 0xfa, 0xdc, 0x82, 0xd3, 0x5a, 0xc2, 0xc4, 0x4b, 0x81, - 0xc3, 0xdc, 0x5f, 0x5c, 0xb6, 0x65, 0x6b, 0xb2, 0xd7, 0x23, 0x77, 0xf7, 0x4b, 0xa7, 0x37, 0x3a, - 0x11, 0xe2, 0xce, 0x7c, 0xd0, 0x55, 0x38, 0xce, 0xa3, 0x6e, 0x2c, 0x11, 0xa7, 0xde, 0x70, 0x3d, - 0x25, 0xdc, 0xf1, 0x6d, 0xe5, 0xe4, 0xdd, 0xfd, 0xd2, 0xf1, 0xf9, 0x2c, 0x02, 0x9c, 0x5d, 0x0e, - 0xbd, 0x06, 0xc3, 0x75, 0x2f, 0x14, 0x7d, 0x30, 0x60, 0xe4, 0xa4, 0x1c, 0x5e, 0x5a, 0xaf, 0xaa, - 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda, 0xe2, 0xd6, 0x15, 0xa5, 0x12, 0x1b, 0x4c, 0x05, 0x63, - 0x4c, 0x6a, 0x8d, 0x8d, 0xd7, 0xf6, 0xdc, 0xac, 0xa8, 0x5e, 0x86, 0x19, 0x0f, 0xf1, 0x0d, 0xc6, - 0xe8, 0x0d, 0x40, 0x22, 0xf7, 0xc9, 0x7c, 0x8d, 0xa5, 0xea, 0xd2, 0x5c, 0x1e, 0xd5, 0x4d, 0xb7, - 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0, 0x65, 0xba, 0x73, 0xe9, 0x50, 0xb1, 0x33, 0xaa, 0xcc, 0xc7, - 0x4b, 0xa4, 0x15, 0x10, 0xe6, 0xa5, 0x67, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0xa7, 0x9c, 0x76, - 0xe4, 0x33, 0xc3, 0x95, 0x49, 0xba, 0xe1, 0xef, 0x10, 0x8f, 0xd9, 0x8c, 0x87, 0x58, 0x90, 0xc7, - 0x53, 0xf3, 0x1d, 0xe8, 0x70, 0x47, 0x2e, 0x54, 0xea, 0xa7, 0x7d, 0xa1, 0xd9, 0x94, 0x8c, 0x87, - 0xc3, 0xdc, 0xd0, 0x2a, 0x29, 0xd0, 0x8b, 0x30, 0xb2, 0xed, 0x87, 0xd1, 0x3a, 0x89, 0x6e, 0xf9, - 0xc1, 0x8e, 0x08, 0xb6, 0x1e, 0x27, 0xb8, 0x88, 0x51, 0x58, 0xa7, 0xa3, 0xd7, 0x7a, 0xe6, 0xd1, - 0x54, 0x5e, 0x62, 0xce, 0x24, 0x43, 0xf1, 0x1e, 0x73, 0x99, 0x83, 0xb1, 0xc4, 0x4b, 0xd2, 0x72, - 0x65, 0x91, 0x39, 0x86, 0x24, 0x48, 0xcb, 0x95, 0x45, 0x2c, 0xf1, 0x74, 0xba, 0x86, 0xdb, 0x4e, - 0x40, 0x2a, 0x81, 0x5f, 0x23, 0xa1, 0x96, 0x56, 0xe5, 0x61, 0x1e, 0x4a, 0x9e, 0x4e, 0xd7, 0x6a, - 0x16, 0x01, 0xce, 0x2e, 0x87, 0x48, 0x3a, 0x59, 0xe8, 0x78, 0xbe, 0x45, 0x2f, 0x2d, 0x33, 0xf5, - 0x98, 0x2f, 0xd4, 0x83, 0x49, 0x95, 0xa6, 0x94, 0x07, 0x8f, 0x0f, 0x67, 0x26, 0xd8, 0xdc, 0xee, - 0x3d, 0xf2, 0xbc, 0xb2, 0x91, 0x96, 0x13, 0x9c, 0x70, 0x8a, 0xb7, 0x11, 0x45, 0x74, 0xb2, 0x6b, - 0x14, 0xd1, 0x0b, 0x30, 0x1c, 0xb6, 0x6f, 0xd6, 0xfd, 0xa6, 0xe3, 0x7a, 0xcc, 0x31, 0x44, 0xbb, - 0x5f, 0x56, 0x25, 0x02, 0xc7, 0x34, 0x68, 0x05, 0x86, 0x1c, 0x69, 0x00, 0x45, 0xf9, 0x01, 0xd2, - 0x94, 0xd9, 0x93, 0xc7, 0x0c, 0x92, 0x26, 0x4f, 0x55, 0x16, 0xbd, 0x0a, 0x63, 0x22, 0x56, 0x84, - 0xc8, 0xec, 0x3d, 0x6d, 0xbe, 0xb2, 0xad, 0xea, 0x48, 0x6c, 0xd2, 0xa2, 0x6b, 0x30, 0x12, 0xf9, - 0x0d, 0xf6, 0x54, 0x94, 0x8a, 0x92, 0x27, 0xf2, 0xe3, 0x98, 0x6e, 0x28, 0x32, 0x5d, 0x35, 0xaf, - 0x8a, 0x62, 0x9d, 0x0f, 0xda, 0xe0, 0xf3, 0x9d, 0x25, 0x51, 0x21, 0xa1, 0x48, 0x0d, 0x7d, 0x3a, - 0xcf, 0xab, 0x8f, 0x91, 0x99, 0xcb, 0x41, 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x09, 0xa6, 0x5a, 0x81, - 0xeb, 0xb3, 0x39, 0xa1, 0x0c, 0xba, 0x33, 0x66, 0xca, 0xc4, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0, - 0x50, 0x1f, 0x02, 0x38, 0x73, 0x92, 0xa7, 0x7d, 0xe2, 0xd7, 0x75, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6, - 0xd8, 0x4e, 0xcc, 0x35, 0x4d, 0x33, 0xb3, 0xf9, 0x31, 0xd9, 0x74, 0x8d, 0x14, 0x17, 0x90, 0xd5, - 0x5f, 0x1c, 0x73, 0x40, 0x75, 0x2d, 0xdb, 0x32, 0xbd, 0x66, 0x84, 0x33, 0xa7, 0x3a, 0xb8, 0x95, - 0x26, 0x6e, 0x7e, 0xb1, 0x40, 0x60, 0x80, 0x43, 0x9c, 0xe0, 0x89, 0x3e, 0x0a, 0x93, 0xe2, 0x1d, - 0x7c, 0xdc, 0x4d, 0xa7, 0xe3, 0xa7, 0x37, 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x76, 0xc9, 0xb9, - 0xd9, 0x20, 0x62, 0xeb, 0x5b, 0x75, 0xbd, 0x9d, 0x70, 0xe6, 0x0c, 0xdb, 0x1f, 0x44, 0xda, 0xa5, - 0x24, 0x16, 0x67, 0x94, 0x40, 0x1b, 0x30, 0xd9, 0x0a, 0x08, 0x69, 0xb2, 0xcb, 0x84, 0x38, 0xcf, - 0x4a, 0x3c, 0xd2, 0x0d, 0x6d, 0x49, 0x25, 0x81, 0x3b, 0xc8, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x0b, - 0x86, 0xfc, 0x5d, 0x12, 0x6c, 0x13, 0xa7, 0x3e, 0x73, 0xb6, 0xc3, 0x83, 0x30, 0x71, 0xb8, 0x5d, - 0x15, 0xb4, 0x09, 0x7f, 0x19, 0x09, 0xee, 0xee, 0x2f, 0x23, 0x2b, 0x43, 0xff, 0x8b, 0x05, 0x27, - 0xa5, 0x05, 0xaa, 0xda, 0xa2, 0xbd, 0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0xd8, 0x2c, 0x8f, 0xe4, - 0xc7, 0x2b, 0xd9, 0xc8, 0x29, 0xa4, 0x94, 0xdd, 0x27, 0xf3, 0x28, 0x42, 0x9c, 0x5f, 0x23, 0xbd, - 0xfe, 0x86, 0x24, 0x92, 0x9b, 0xd1, 0x7c, 0xb8, 0xf2, 0xe6, 0xd2, 0xfa, 0xcc, 0xa3, 0x3c, 0xb0, - 0x0c, 0x5d, 0x0c, 0xd5, 0x24, 0x12, 0xa7, 0xe9, 0xd1, 0x45, 0x28, 0xf8, 0xe1, 0xcc, 0x63, 0x1d, - 0x12, 0x74, 0xfb, 0xf5, 0xab, 0x55, 0xee, 0x37, 0x79, 0xb5, 0x8a, 0x0b, 0x7e, 0x28, 0x53, 0x1f, - 0xd1, 0x3b, 0x5f, 0x38, 0xf3, 0x38, 0x57, 0x8d, 0xca, 0xd4, 0x47, 0x0c, 0x88, 0x63, 0x3c, 0xda, - 0x86, 0x89, 0xd0, 0xb8, 0x5b, 0x87, 0x33, 0xe7, 0x58, 0x4f, 0x3d, 0x9e, 0x37, 0x68, 0x06, 0xb5, - 0x96, 0x93, 0xc4, 0xe4, 0x82, 0x93, 0x6c, 0xf9, 0xea, 0xd2, 0x6e, 0xf7, 0xe1, 0xcc, 0x13, 0x5d, - 0x56, 0x97, 0x46, 0xac, 0xaf, 0x2e, 0x9d, 0x07, 0x4e, 0xf0, 0x9c, 0xfd, 0x2e, 0x98, 0x4a, 0x89, - 0x4b, 0x87, 0x79, 0x23, 0x30, 0xbb, 0x03, 0x63, 0xc6, 0x94, 0x7c, 0xa0, 0x2e, 0x24, 0xbf, 0x3f, - 0x0c, 0xc3, 0xca, 0xb4, 0x8f, 0x2e, 0x98, 0x5e, 0x23, 0x27, 0x93, 0x5e, 0x23, 0x43, 0x15, 0xbf, - 0x6e, 0x38, 0x8a, 0x6c, 0x64, 0x04, 0x22, 0xcd, 0xdb, 0x00, 0x7b, 0x7f, 0xc8, 0xa4, 0x99, 0x2b, - 0x8a, 0x3d, 0xbb, 0x9f, 0xf4, 0x75, 0xb4, 0x80, 0x5c, 0x82, 0x29, 0xcf, 0x67, 0x32, 0x3a, 0xa9, - 0x4b, 0x01, 0x8c, 0xc9, 0x59, 0xc3, 0x7a, 0x3c, 0xaf, 0x04, 0x01, 0x4e, 0x97, 0xa1, 0x15, 0x72, - 0x41, 0x29, 0x69, 0x72, 0xe1, 0x72, 0x14, 0x16, 0x58, 0x7a, 0x37, 0xe4, 0xbf, 0xc2, 0x99, 0xc9, - 0xfc, 0xbb, 0x21, 0x2f, 0x94, 0x14, 0xc6, 0x42, 0x29, 0x8c, 0x31, 0x0b, 0x43, 0xcb, 0xaf, 0x97, - 0x2b, 0x42, 0xcc, 0xd7, 0x42, 0x84, 0xd7, 0xcb, 0x15, 0xcc, 0x71, 0x68, 0x1e, 0x06, 0xd8, 0x0f, - 0x19, 0x27, 0x25, 0x6f, 0x99, 0x96, 0x2b, 0x5a, 0xea, 0x45, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0x83, - 0x4c, 0xef, 0x46, 0x4c, 0x83, 0x3c, 0x78, 0x8f, 0x1a, 0x64, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x6d, - 0x38, 0x6e, 0xdc, 0x47, 0xd5, 0xcb, 0x2e, 0xc8, 0x37, 0x2e, 0x27, 0x88, 0x17, 0x4e, 0x8b, 0x46, - 0x1f, 0x2f, 0x67, 0x71, 0xc2, 0xd9, 0x15, 0xa0, 0x06, 0x4c, 0xd5, 0x52, 0xb5, 0x0e, 0xf5, 0x5e, - 0xab, 0x9a, 0x17, 0xe9, 0x1a, 0xd3, 0x8c, 0xd1, 0xab, 0x30, 0xf4, 0x8e, 0xcf, 0x1d, 0xc1, 0xc4, - 0xd5, 0x44, 0x46, 0x15, 0x19, 0x7a, 0xf3, 0x6a, 0x95, 0xc1, 0x0f, 0xf6, 0x4b, 0x23, 0x15, 0xbf, - 0x2e, 0xff, 0x62, 0x55, 0x00, 0xfd, 0x90, 0x05, 0xb3, 0xe9, 0x0b, 0xaf, 0x6a, 0xf4, 0x58, 0xef, - 0x8d, 0xb6, 0x45, 0xa5, 0xb3, 0xcb, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xd0, 0x87, 0xe9, 0x7a, 0x0a, - 0xdd, 0x3b, 0x44, 0xe4, 0xad, 0x7e, 0x24, 0x5e, 0x4f, 0x14, 0x7a, 0xb0, 0x5f, 0x9a, 0xe0, 0x3b, - 0xa3, 0x7b, 0x47, 0x05, 0x33, 0xe7, 0x05, 0xd0, 0xf7, 0xc2, 0xf1, 0x20, 0xad, 0xa5, 0x25, 0x52, - 0x08, 0x7f, 0xaa, 0x97, 0x5d, 0x36, 0x39, 0xe0, 0x38, 0x8b, 0x21, 0xce, 0xae, 0xc7, 0xfe, 0x75, - 0x8b, 0xe9, 0xd0, 0x45, 0xb3, 0x48, 0xd8, 0x6e, 0x1c, 0x45, 0xb6, 0xfc, 0x65, 0xc3, 0x3e, 0x7d, - 0xcf, 0xde, 0x53, 0xbf, 0x6b, 0x31, 0xef, 0xa9, 0x23, 0x7c, 0x07, 0xf6, 0x26, 0x0c, 0x45, 0xa2, - 0xb6, 0x4e, 0x09, 0xfe, 0xb5, 0x46, 0x31, 0x0f, 0x32, 0x75, 0xc9, 0x91, 0x50, 0xac, 0xd8, 0xd8, - 0xff, 0x84, 0x8f, 0x80, 0xc4, 0x1c, 0x81, 0x19, 0x70, 0xc9, 0x34, 0x03, 0x96, 0xba, 0x7c, 0x41, - 0x8e, 0x39, 0xf0, 0x1f, 0x9b, 0xed, 0x66, 0xca, 0xbd, 0xf7, 0xba, 0xdb, 0x9e, 0xfd, 0x05, 0x0b, - 0x20, 0xce, 0x1e, 0xd1, 0x43, 0x9e, 0xda, 0x97, 0xe9, 0xb5, 0xc6, 0x8f, 0xfc, 0x9a, 0xdf, 0x10, - 0x46, 0x90, 0x53, 0xb1, 0x25, 0x92, 0xc3, 0x0f, 0xb4, 0xdf, 0x58, 0x51, 0xa3, 0x92, 0x0c, 0xe7, - 0x5a, 0x8c, 0x6d, 0xe3, 0x46, 0x28, 0xd7, 0x2f, 0x5b, 0x70, 0x2c, 0xeb, 0x51, 0x01, 0xbd, 0x24, - 0x73, 0x35, 0xa7, 0x72, 0xa9, 0x54, 0xa3, 0x79, 0x5d, 0xc0, 0xb1, 0xa2, 0xe8, 0x39, 0x01, 0xf0, - 0xe1, 0x32, 0x1b, 0x5c, 0x85, 0xb1, 0x4a, 0x40, 0x34, 0xf9, 0xe2, 0x75, 0x1e, 0xad, 0x87, 0xb7, - 0xe7, 0x99, 0x43, 0x47, 0xea, 0xb1, 0xbf, 0x52, 0x80, 0x63, 0xdc, 0x31, 0x68, 0x7e, 0xd7, 0x77, - 0xeb, 0x15, 0xbf, 0x2e, 0x9e, 0x82, 0xbe, 0x05, 0xa3, 0x2d, 0x4d, 0x37, 0xdd, 0x29, 0x4a, 0xb7, - 0xae, 0xc3, 0x8e, 0xb5, 0x69, 0x3a, 0x14, 0x1b, 0xbc, 0x50, 0x1d, 0x46, 0xc9, 0xae, 0x5b, 0x53, - 0xde, 0x25, 0x85, 0x43, 0x1f, 0xd2, 0xaa, 0x96, 0x65, 0x8d, 0x0f, 0x36, 0xb8, 0xf6, 0xec, 0xce, - 0xab, 0x89, 0x68, 0x7d, 0x5d, 0x3c, 0x4a, 0x7e, 0xdc, 0x82, 0x87, 0x72, 0x62, 0x7a, 0xd3, 0xea, - 0x6e, 0x31, 0x17, 0x2c, 0x31, 0x6d, 0x55, 0x75, 0xdc, 0x31, 0x0b, 0x0b, 0x2c, 0xfa, 0x18, 0x00, - 0x77, 0xac, 0x22, 0x5e, 0xad, 0x6b, 0xf0, 0x63, 0x23, 0x5a, 0xab, 0x16, 0x78, 0x53, 0x96, 0xc7, - 0x1a, 0x2f, 0xfb, 0xcb, 0x7d, 0xd0, 0xcf, 0x1c, 0x79, 0x50, 0x05, 0x06, 0xb7, 0x79, 0x5c, 0xb8, - 0x8e, 0xe3, 0x46, 0x69, 0x65, 0xa0, 0xb9, 0x78, 0xdc, 0x34, 0x28, 0x96, 0x6c, 0xd0, 0x1a, 0x4c, - 0xf3, 0x2c, 0x84, 0x8d, 0x25, 0xd2, 0x70, 0xf6, 0xa4, 0xda, 0x97, 0x27, 0xd6, 0x57, 0xea, 0xef, - 0x72, 0x9a, 0x04, 0x67, 0x95, 0x43, 0xaf, 0xc3, 0x38, 0xbd, 0x86, 0xfb, 0xed, 0x48, 0x72, 0xe2, - 0xf9, 0x07, 0xd5, 0xcd, 0x64, 0xc3, 0xc0, 0xe2, 0x04, 0x35, 0x7a, 0x15, 0xc6, 0x5a, 0x29, 0x05, - 0x77, 0x7f, 0xac, 0x09, 0x32, 0x95, 0xda, 0x26, 0x2d, 0x7b, 0x57, 0xd0, 0x66, 0xaf, 0x28, 0x36, - 0xb6, 0x03, 0x12, 0x6e, 0xfb, 0x8d, 0x3a, 0x93, 0x80, 0xfb, 0xb5, 0x77, 0x05, 0x09, 0x3c, 0x4e, - 0x95, 0xa0, 0x5c, 0x36, 0x1d, 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4, 0xb2, 0x92, 0xc0, - 0xe3, 0x54, 0x89, 0xee, 0x9a, 0xfb, 0xc1, 0xfb, 0xa3, 0xb9, 0xb7, 0x7f, 0xa6, 0x00, 0xc6, 0xd0, - 0x7e, 0x07, 0xe7, 0x45, 0x7c, 0x0d, 0xfa, 0xb6, 0x82, 0x56, 0x4d, 0x38, 0xad, 0x65, 0x7e, 0x59, - 0x9c, 0x14, 0x9d, 0x7f, 0x19, 0xfd, 0x8f, 0x59, 0x29, 0xba, 0xc6, 0x8f, 0x57, 0x02, 0x9f, 0x1e, - 0x72, 0x32, 0x74, 0xa4, 0x7a, 0xbe, 0x33, 0x28, 0x03, 0x51, 0x74, 0x08, 0xb2, 0x2c, 0xde, 0x20, - 0x70, 0x0e, 0x86, 0x7f, 0x57, 0x55, 0x84, 0x9b, 0x91, 0x5c, 0xd0, 0x45, 0x18, 0x11, 0xa9, 0xea, - 0xd8, 0x2b, 0x13, 0xbe, 0x98, 0x98, 0x3f, 0xda, 0x52, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, 0xc3, 0x05, - 0x98, 0xce, 0x78, 0x26, 0xc8, 0x8f, 0x91, 0x2d, 0x37, 0x8c, 0x54, 0xde, 0x75, 0xed, 0x18, 0xe1, - 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0xe1, 0x08, 0xec, 0x21, - 0x33, 0x98, 0x9f, 0x85, 0xbe, 0x76, 0x48, 0x64, 0xa0, 0x74, 0x75, 0x6c, 0x33, 0xd3, 0x39, 0xc3, - 0xd0, 0x2b, 0xe0, 0x96, 0xb2, 0x42, 0x6b, 0x57, 0x40, 0x6e, 0x87, 0xe6, 0x38, 0xda, 0xb8, 0x88, - 0x78, 0x8e, 0x17, 0x89, 0x8b, 0x62, 0x1c, 0xe7, 0x97, 0x41, 0xb1, 0xc0, 0xda, 0x5f, 0x2a, 0xc2, - 0xc9, 0xdc, 0x87, 0xc3, 0xb4, 0xe9, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0x39, 0xfa, 0xf1, 0xd8, 0xbe, - 0xa4, 0xb5, 0xbd, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x73, 0xd0, 0xcf, 0x94, 0xe2, 0xa9, 0x0c, 0xf4, - 0x0b, 0x4b, 0x3c, 0x02, 0x23, 0x47, 0x6b, 0xa7, 0x7a, 0xb1, 0xe3, 0xa9, 0xfe, 0x28, 0x95, 0x60, - 0xfc, 0x46, 0xf2, 0x40, 0xa1, 0xcd, 0xf5, 0xfd, 0x06, 0x66, 0x48, 0xf4, 0xb8, 0xe8, 0xaf, 0x84, - 0x67, 0x1b, 0x76, 0xea, 0x7e, 0xa8, 0x75, 0xda, 0x93, 0x30, 0xb8, 0x43, 0xf6, 0x02, 0xd7, 0xdb, - 0x4a, 0x7a, 0x3c, 0x5e, 0xe1, 0x60, 0x2c, 0xf1, 0x66, 0x32, 0xe4, 0xc1, 0xfb, 0x91, 0x0c, 0x59, - 0x9f, 0x01, 0x43, 0x5d, 0xc5, 0x93, 0x1f, 0x29, 0xc2, 0x04, 0x5e, 0x58, 0x7a, 0x7f, 0x20, 0xae, - 0xa5, 0x07, 0xe2, 0x7e, 0xe4, 0x0c, 0x3e, 0xdc, 0x68, 0xfc, 0xb2, 0x05, 0x13, 0x2c, 0x61, 0x9e, - 0x88, 0xfa, 0xe1, 0xfa, 0xde, 0x11, 0x5c, 0x05, 0x1e, 0x85, 0xfe, 0x80, 0x56, 0x9a, 0x4c, 0x3d, - 0xcf, 0x5a, 0x82, 0x39, 0x0e, 0x9d, 0x82, 0x3e, 0xd6, 0x04, 0x3a, 0x78, 0xa3, 0x7c, 0x0b, 0x5e, - 0x72, 0x22, 0x07, 0x33, 0x28, 0x8b, 0x3f, 0x88, 0x49, 0xab, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b, - 0xef, 0x8d, 0x90, 0x22, 0x99, 0x4d, 0x7b, 0x77, 0xf1, 0x07, 0xb3, 0x59, 0x76, 0xbe, 0x66, 0xff, - 0x75, 0x01, 0xce, 0x64, 0x96, 0xeb, 0x39, 0xfe, 0x60, 0xe7, 0xd2, 0x0f, 0x32, 0xf7, 0x57, 0xf1, - 0x08, 0xfd, 0xc9, 0xfb, 0x7a, 0x95, 0xfe, 0xfb, 0x7b, 0x08, 0x0b, 0x98, 0xd9, 0x65, 0xef, 0x91, - 0xb0, 0x80, 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x43, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x3c, - 0xdd, 0x67, 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x0f, 0x13, - 0x4d, 0xd7, 0xa3, 0x9b, 0xcf, 0x9e, 0x29, 0x8a, 0x2b, 0x5b, 0xc6, 0x9a, 0x89, 0xc6, 0x49, 0x7a, - 0xe4, 0x6a, 0x21, 0x03, 0xf9, 0xd7, 0xbd, 0x7a, 0xa8, 0x55, 0x37, 0x67, 0xba, 0x73, 0xa8, 0x5e, - 0xcc, 0x08, 0x1f, 0xb8, 0xa6, 0xe9, 0x89, 0x8a, 0xbd, 0xeb, 0x89, 0x46, 0xb3, 0x75, 0x44, 0xb3, - 0xaf, 0xc2, 0xd8, 0x3d, 0xdb, 0x46, 0xec, 0x6f, 0x14, 0xe1, 0xe1, 0x0e, 0xcb, 0x9e, 0xef, 0xf5, - 0xc6, 0x18, 0x68, 0x7b, 0x7d, 0x6a, 0x1c, 0x2a, 0x70, 0x6c, 0xb3, 0xdd, 0x68, 0xec, 0xb1, 0x87, - 0x53, 0xa4, 0x2e, 0x29, 0x84, 0x4c, 0x29, 0x95, 0x23, 0xc7, 0x56, 0x32, 0x68, 0x70, 0x66, 0x49, - 0x7a, 0xc5, 0xa2, 0x27, 0xc9, 0x9e, 0x62, 0x95, 0xb8, 0x62, 0x61, 0x1d, 0x89, 0x4d, 0x5a, 0x74, - 0x09, 0xa6, 0x9c, 0x5d, 0xc7, 0xe5, 0x29, 0x1e, 0x24, 0x03, 0x7e, 0xc7, 0x52, 0xba, 0xe8, 0xf9, - 0x24, 0x01, 0x4e, 0x97, 0x41, 0x6f, 0x00, 0xf2, 0x6f, 0xb2, 0xc7, 0x18, 0xf5, 0x4b, 0xc4, 0x13, - 0x56, 0x77, 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0xc1, 0xeb, - 0x06, 0xf2, 0x83, 0xd7, 0x75, 0xde, 0x17, 0xbb, 0xa6, 0x9d, 0xbb, 0x08, 0x63, 0x87, 0x74, 0x31, - 0xb6, 0xff, 0x9d, 0x05, 0x4a, 0x41, 0x6c, 0x06, 0x9f, 0x7e, 0x95, 0xf9, 0x40, 0x73, 0xd5, 0xb6, - 0x16, 0x6f, 0xea, 0xb8, 0xe6, 0x03, 0x1d, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0xbb, 0x6c, - 0xdc, 0x0a, 0x44, 0x6c, 0x4c, 0x45, 0x81, 0x3e, 0x0e, 0x83, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72, - 0xec, 0xd0, 0xc6, 0xb8, 0x78, 0xeb, 0x5c, 0xe2, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x48, 0x21, 0xee, - 0x93, 0x37, 0xdb, 0x7e, 0xe4, 0x1c, 0xc1, 0x49, 0x7e, 0xc9, 0x38, 0xc9, 0x1f, 0xcf, 0x1e, 0x68, - 0xad, 0x49, 0xb9, 0x27, 0xf8, 0xd5, 0xc4, 0x09, 0xfe, 0x44, 0x77, 0x56, 0x9d, 0x4f, 0xee, 0x5f, - 0xb3, 0x60, 0xca, 0xa0, 0x3f, 0x82, 0x03, 0x64, 0xc5, 0x3c, 0x40, 0x1e, 0xe9, 0xfa, 0x0d, 0x39, - 0x07, 0xc7, 0x0f, 0x16, 0x13, 0x6d, 0x67, 0x07, 0xc6, 0x3b, 0xd0, 0xb7, 0xed, 0x04, 0x75, 0x71, - 0x2f, 0xbe, 0xd0, 0x53, 0x5f, 0xcf, 0x5d, 0x76, 0x02, 0xe1, 0xa9, 0xf0, 0x8c, 0xec, 0x75, 0x0a, - 0xea, 0xea, 0xa5, 0xc0, 0xaa, 0x42, 0x2f, 0xc3, 0x40, 0x58, 0xf3, 0x5b, 0xea, 0x5d, 0x16, 0xcb, - 0x65, 0x5c, 0x65, 0x90, 0x83, 0xfd, 0x12, 0x32, 0xab, 0xa3, 0x60, 0x2c, 0xe8, 0xd1, 0x5b, 0x30, - 0xc6, 0x7e, 0x29, 0xb7, 0xc1, 0x62, 0xbe, 0x06, 0xa3, 0xaa, 0x13, 0x72, 0x9f, 0x5a, 0x03, 0x84, - 0x4d, 0x56, 0xb3, 0x5b, 0x30, 0xac, 0x3e, 0xeb, 0x81, 0x5a, 0xbb, 0xff, 0x4d, 0x11, 0xa6, 0x33, - 0xe6, 0x1c, 0x0a, 0x8d, 0x91, 0xb8, 0xd8, 0xe3, 0x54, 0x7d, 0x97, 0x63, 0x11, 0xb2, 0x0b, 0x54, - 0x5d, 0xcc, 0xad, 0x9e, 0x2b, 0xbd, 0x16, 0x92, 0x64, 0xa5, 0x14, 0xd4, 0xbd, 0x52, 0x5a, 0xd9, - 0x91, 0x75, 0x35, 0xad, 0x48, 0xb5, 0xf4, 0x81, 0x8e, 0xe9, 0x6f, 0xf5, 0xc1, 0xb1, 0xac, 0x98, - 0xc5, 0xe8, 0xb3, 0x89, 0x04, 0xe9, 0x2f, 0x74, 0xea, 0x61, 0xbd, 0x24, 0xcf, 0x9a, 0x2e, 0x42, - 0x85, 0xce, 0x99, 0x29, 0xd3, 0xbb, 0x76, 0xb3, 0xa8, 0x93, 0x85, 0xf0, 0x09, 0x78, 0x62, 0x7b, - 0xb9, 0x7d, 0x7c, 0xa8, 0xe7, 0x06, 0x88, 0x8c, 0xf8, 0x61, 0xc2, 0x25, 0x49, 0x82, 0xbb, 0xbb, - 0x24, 0xc9, 0x9a, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0xd7, 0xa5, 0xd8, 0x7d, 0x0b, 0xe3, 0x8e, 0x2e, - 0x6a, 0x03, 0x16, 0x0e, 0x2e, 0x82, 0xc1, 0xac, 0x0b, 0x23, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, - 0x43, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x71, 0x0b, 0x12, 0x8f, 0x6a, 0x94, 0x52, - 0xce, 0xca, 0x55, 0xca, 0x9d, 0x85, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xf6, 0x6d, 0xec, 0x37, 0x08, - 0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x51, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x42, 0x7f, - 0x83, 0xec, 0x92, 0x46, 0x32, 0x49, 0xe2, 0x2a, 0x05, 0x62, 0x8e, 0xb3, 0x7f, 0xb9, 0x0f, 0x4e, - 0x77, 0x8c, 0xa7, 0x45, 0x2f, 0x63, 0x5b, 0x4e, 0x44, 0x6e, 0x39, 0x7b, 0xc9, 0x1c, 0x66, 0x97, - 0x38, 0x18, 0x4b, 0x3c, 0x7b, 0x62, 0xca, 0xf3, 0x83, 0x24, 0x54, 0x98, 0x22, 0x2d, 0x88, 0xc0, - 0x9a, 0x2a, 0xb1, 0xe2, 0xfd, 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17, - 0x6f, 0x57, 0xe3, 0x3c, 0x32, 0xd5, 0x55, 0x81, 0xc1, 0x1a, 0x15, 0x5a, 0x82, 0xc9, 0x56, 0xe0, - 0x47, 0x5c, 0x23, 0xbc, 0xc4, 0x3d, 0x67, 0xfb, 0xcd, 0x50, 0x46, 0x95, 0x04, 0x1e, 0xa7, 0x4a, - 0xa0, 0x17, 0x61, 0x44, 0x84, 0x37, 0xaa, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35, - 0x46, 0x61, 0x9d, 0x4e, 0x2b, 0xc6, 0xd4, 0xcc, 0x83, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, - 0x84, 0x42, 0x1f, 0xea, 0x29, 0x14, 0x7a, 0xac, 0x96, 0x1b, 0xee, 0xd9, 0xea, 0x09, 0x5d, 0x15, - 0x59, 0x5f, 0xed, 0x83, 0x69, 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x96, 0x9e, 0x2e, 0xf7, 0x43, - 0x71, 0xf7, 0xfe, 0x9c, 0x39, 0xea, 0x39, 0xf3, 0xa3, 0x16, 0x98, 0x92, 0x1a, 0xfa, 0x9f, 0x72, - 0x93, 0x40, 0xbe, 0x98, 0x2b, 0xf9, 0xc5, 0x71, 0x92, 0xdf, 0x5d, 0x3a, 0x48, 0xfb, 0x8f, 0x2d, - 0x78, 0xa4, 0x2b, 0x47, 0xb4, 0x0c, 0xc3, 0x4c, 0x9c, 0xd4, 0x2e, 0x7a, 0x4f, 0x28, 0xcf, 0x7a, - 0x89, 0xc8, 0x91, 0x6e, 0xe3, 0x92, 0x68, 0x39, 0x95, 0x6d, 0xf3, 0xc9, 0x8c, 0x6c, 0x9b, 0xc7, - 0x8d, 0xee, 0xb9, 0xc7, 0x74, 0x9b, 0x5f, 0xa4, 0x27, 0x8e, 0xf9, 0x72, 0xee, 0x43, 0x86, 0xd2, - 0xd1, 0x4e, 0x28, 0x1d, 0x91, 0x49, 0xad, 0x9d, 0x21, 0x1f, 0x85, 0x49, 0x16, 0xf7, 0x90, 0xbd, - 0xf3, 0x10, 0x4f, 0xee, 0x0a, 0xb1, 0x2f, 0xf7, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0xf6, 0x5f, 0x16, - 0x61, 0x80, 0x2f, 0xbf, 0x23, 0xb8, 0x5e, 0x3e, 0x0d, 0xc3, 0x6e, 0xb3, 0xd9, 0xe6, 0x09, 0x14, - 0xfb, 0x63, 0xcf, 0xe0, 0xb2, 0x04, 0xe2, 0x18, 0x8f, 0x56, 0x84, 0xbe, 0xbb, 0x43, 0x68, 0x65, - 0xde, 0xf0, 0xb9, 0x25, 0x27, 0x72, 0xb8, 0xac, 0xa4, 0xce, 0xd9, 0x58, 0x33, 0x8e, 0x3e, 0x05, - 0x10, 0x46, 0x81, 0xeb, 0x6d, 0x51, 0x98, 0x88, 0xbf, 0xff, 0x54, 0x07, 0x6e, 0x55, 0x45, 0xcc, - 0x79, 0xc6, 0x7b, 0x8e, 0x42, 0x60, 0x8d, 0x23, 0x9a, 0x33, 0x4e, 0xfa, 0xd9, 0xc4, 0xd8, 0x01, - 0xe7, 0x1a, 0x8f, 0xd9, 0xec, 0x4b, 0x30, 0xac, 0x98, 0x77, 0xd3, 0x7e, 0x8d, 0xea, 0x62, 0xd1, - 0x47, 0x60, 0x22, 0xd1, 0xb6, 0x43, 0x29, 0xcf, 0x7e, 0xc5, 0x82, 0x09, 0xde, 0x98, 0x65, 0x6f, - 0x57, 0x9c, 0x06, 0x77, 0xe0, 0x58, 0x23, 0x63, 0x57, 0x16, 0xc3, 0xdf, 0xfb, 0x2e, 0xae, 0x94, - 0x65, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x3a, 0x4f, 0x57, 0x1c, 0xdd, 0x75, 0x9d, 0x86, 0x88, 0xa1, - 0x30, 0xca, 0x57, 0x1b, 0x87, 0x61, 0x85, 0xb5, 0xff, 0xd4, 0x82, 0x29, 0xde, 0xf2, 0x2b, 0x64, - 0x4f, 0xed, 0x4d, 0xdf, 0xca, 0xb6, 0x8b, 0xd4, 0xbd, 0x85, 0x9c, 0xd4, 0xbd, 0xfa, 0xa7, 0x15, - 0x3b, 0x7e, 0xda, 0x57, 0x2c, 0x10, 0x33, 0xe4, 0x08, 0xf4, 0x19, 0xdf, 0x65, 0xea, 0x33, 0x66, - 0xf3, 0x17, 0x41, 0x8e, 0x22, 0xe3, 0xef, 0x2d, 0x98, 0xe4, 0x04, 0xb1, 0xad, 0xfe, 0x5b, 0x3a, - 0x0e, 0x0b, 0xe6, 0x17, 0x65, 0x3a, 0x5f, 0x5e, 0x21, 0x7b, 0x1b, 0x7e, 0xc5, 0x89, 0xb6, 0xb3, - 0x3f, 0xca, 0x18, 0xac, 0xbe, 0x8e, 0x83, 0x55, 0x97, 0x0b, 0xc8, 0x48, 0x37, 0xd7, 0x25, 0x08, - 0xc1, 0x61, 0xd3, 0xcd, 0xd9, 0x7f, 0x65, 0x01, 0xe2, 0xd5, 0x18, 0x82, 0x1b, 0x15, 0x87, 0x18, - 0x54, 0x3b, 0xe8, 0xe2, 0xad, 0x49, 0x61, 0xb0, 0x46, 0x75, 0x5f, 0xba, 0x27, 0xe1, 0x70, 0x51, - 0xec, 0xee, 0x70, 0x71, 0x88, 0x1e, 0xfd, 0xca, 0x20, 0x24, 0x5f, 0xf6, 0xa1, 0xeb, 0x30, 0x5a, - 0x73, 0x5a, 0xce, 0x4d, 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0x3b, 0x79, 0x63, 0x2d, 0x6a, 0x74, 0xc2, - 0x44, 0xae, 0x41, 0xb0, 0xc1, 0x07, 0xcd, 0x01, 0xb4, 0x02, 0x77, 0xd7, 0x6d, 0x90, 0x2d, 0xa6, - 0x76, 0x61, 0x51, 0x5b, 0xb8, 0x6b, 0x98, 0x84, 0x62, 0x8d, 0x22, 0x23, 0x54, 0x43, 0xf1, 0x01, - 0x87, 0x6a, 0x80, 0x23, 0x0b, 0xd5, 0xd0, 0x77, 0xa8, 0x50, 0x0d, 0x43, 0x87, 0x0e, 0xd5, 0xd0, - 0xdf, 0x53, 0xa8, 0x06, 0x0c, 0x27, 0xa4, 0xec, 0x49, 0xff, 0xaf, 0xb8, 0x0d, 0x22, 0x2e, 0x1c, - 0x3c, 0xd4, 0xcc, 0xec, 0xdd, 0xfd, 0xd2, 0x09, 0x9c, 0x49, 0x81, 0x73, 0x4a, 0xa2, 0x8f, 0xc1, - 0x8c, 0xd3, 0x68, 0xf8, 0xb7, 0xd4, 0xa0, 0x2e, 0x87, 0x35, 0xa7, 0xc1, 0x4d, 0x20, 0x83, 0x8c, - 0xeb, 0xa9, 0xbb, 0xfb, 0xa5, 0x99, 0xf9, 0x1c, 0x1a, 0x9c, 0x5b, 0x1a, 0xbd, 0x06, 0xc3, 0xad, - 0xc0, 0xaf, 0xad, 0x69, 0xcf, 0x8f, 0xcf, 0xd0, 0x0e, 0xac, 0x48, 0xe0, 0xc1, 0x7e, 0x69, 0x4c, - 0xfd, 0x61, 0x07, 0x7e, 0x5c, 0x20, 0x23, 0xf6, 0xc2, 0xc8, 0x83, 0x8e, 0xbd, 0x30, 0x7a, 0xbf, - 0x63, 0x2f, 0xec, 0xc0, 0x74, 0x95, 0x04, 0xae, 0xd3, 0x70, 0xef, 0x50, 0x99, 0x5c, 0xee, 0x81, - 0x1b, 0x30, 0x1c, 0x24, 0x76, 0xfd, 0x9e, 0x42, 0x2a, 0x6b, 0x59, 0xc5, 0xe4, 0x2e, 0x1f, 0x33, - 0xb2, 0xff, 0xab, 0x05, 0x83, 0xe2, 0xb5, 0xe0, 0x11, 0x48, 0xa6, 0xf3, 0x86, 0xe1, 0xa3, 0x94, - 0x3d, 0x28, 0xac, 0x31, 0xb9, 0x26, 0x8f, 0x72, 0xc2, 0xe4, 0xf1, 0x48, 0x27, 0x26, 0x9d, 0x8d, - 0x1d, 0xff, 0x67, 0x91, 0xde, 0x10, 0x8c, 0x77, 0xeb, 0x0f, 0xbe, 0x0b, 0xd6, 0x61, 0x30, 0x14, - 0xef, 0xa6, 0x0b, 0xf9, 0x2f, 0x4e, 0x92, 0x83, 0x18, 0x7b, 0xea, 0x89, 0x97, 0xd2, 0x92, 0x49, - 0xe6, 0x83, 0xec, 0xe2, 0x03, 0x7c, 0x90, 0xdd, 0xed, 0x65, 0x7f, 0xdf, 0xfd, 0x78, 0xd9, 0x6f, - 0x7f, 0x9d, 0x9d, 0xce, 0x3a, 0xfc, 0x08, 0x04, 0xb7, 0x4b, 0xe6, 0x39, 0x6e, 0x77, 0x98, 0x59, - 0xa2, 0x51, 0x39, 0x02, 0xdc, 0x2f, 0x59, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x69, 0xee, 0x19, 0x18, - 0x72, 0xda, 0x75, 0x57, 0xad, 0x65, 0xcd, 0xfc, 0x39, 0x2f, 0xe0, 0x58, 0x51, 0xa0, 0x45, 0x98, - 0x22, 0xb7, 0x5b, 0x2e, 0x37, 0x16, 0xeb, 0x0e, 0xce, 0x45, 0xfe, 0xc4, 0x74, 0x39, 0x89, 0xc4, - 0x69, 0x7a, 0x15, 0xe8, 0xaa, 0x98, 0x1b, 0xe8, 0xea, 0x17, 0x2c, 0x18, 0x51, 0x2f, 0x87, 0x1f, - 0x78, 0x6f, 0x7f, 0xd4, 0xec, 0xed, 0x87, 0x3b, 0xf4, 0x76, 0x4e, 0x37, 0xff, 0x51, 0x41, 0xb5, - 0xb7, 0xe2, 0x07, 0x51, 0x0f, 0x52, 0xe2, 0xbd, 0x3f, 0xce, 0xb8, 0x08, 0x23, 0x4e, 0xab, 0x25, - 0x11, 0xd2, 0xcb, 0x8e, 0x05, 0xc8, 0x8f, 0xc1, 0x58, 0xa7, 0x51, 0x6f, 0x45, 0x8a, 0xb9, 0x6f, - 0x45, 0xea, 0x00, 0x91, 0x13, 0x6c, 0x91, 0x88, 0xc2, 0x84, 0x53, 0x70, 0xfe, 0x7e, 0xd3, 0x8e, - 0xdc, 0xc6, 0x9c, 0xeb, 0x45, 0x61, 0x14, 0xcc, 0x95, 0xbd, 0xe8, 0x6a, 0xc0, 0xaf, 0xa9, 0x5a, - 0xa8, 0x38, 0xc5, 0x0b, 0x6b, 0x7c, 0x65, 0x94, 0x0c, 0x56, 0x47, 0xbf, 0xe9, 0xae, 0xb1, 0x2e, - 0xe0, 0x58, 0x51, 0xd8, 0x2f, 0xb1, 0xd3, 0x87, 0xf5, 0xe9, 0xe1, 0xc2, 0xa4, 0xfd, 0xf5, 0xa8, - 0x1a, 0x0d, 0x66, 0x78, 0x5d, 0xd2, 0x83, 0xb1, 0x75, 0xde, 0xec, 0x69, 0xc5, 0xfa, 0xab, 0xcb, - 0x38, 0x62, 0x1b, 0xfa, 0x44, 0xca, 0x05, 0xe7, 0xd9, 0x2e, 0xa7, 0xc6, 0x21, 0x9c, 0x6e, 0x58, - 0xb6, 0x2c, 0x96, 0x4b, 0xa8, 0x5c, 0x11, 0xeb, 0x42, 0xcb, 0x96, 0x25, 0x10, 0x38, 0xa6, 0xa1, - 0x02, 0x9b, 0xfa, 0x13, 0xce, 0xa0, 0x38, 0xa8, 0xb2, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x08, - 0xa5, 0x05, 0xb7, 0x3d, 0x3c, 0x9c, 0x50, 0x5a, 0xc8, 0xee, 0xd2, 0x34, 0x4d, 0x17, 0x61, 0x84, - 0xdc, 0x8e, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43, 0x7f, 0x1c, 0x07, 0x74, 0x39, 0x06, 0x63, 0x9d, - 0x06, 0x6d, 0xc0, 0x44, 0xc8, 0x75, 0x79, 0x2a, 0x94, 0x3f, 0xd7, 0x89, 0x3e, 0xa5, 0xde, 0x6c, - 0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c, 0x64, 0x91, 0x64, 0x81, 0x5e, 0x87, 0xf1, 0x86, - 0xef, 0xd4, 0x17, 0x9c, 0x86, 0xe3, 0xd5, 0x58, 0xff, 0x0c, 0x99, 0x39, 0xd7, 0x57, 0x0d, 0x2c, - 0x4e, 0x50, 0x53, 0x01, 0x51, 0x87, 0x88, 0x70, 0x73, 0x8e, 0xb7, 0x45, 0xc2, 0x99, 0x61, 0xf6, - 0x55, 0x4c, 0x40, 0x5c, 0xcd, 0xa1, 0xc1, 0xb9, 0xa5, 0xd1, 0xcb, 0x30, 0x2a, 0x3f, 0x5f, 0x0b, - 0xfc, 0x12, 0x3f, 0xbb, 0xd1, 0x70, 0xd8, 0xa0, 0x44, 0x21, 0x1c, 0x97, 0xff, 0x37, 0x02, 0x67, - 0x73, 0xd3, 0xad, 0x89, 0x68, 0x08, 0xfc, 0x89, 0xf2, 0x47, 0xe4, 0x7b, 0xc8, 0xe5, 0x2c, 0xa2, - 0x83, 0xfd, 0xd2, 0x29, 0xd1, 0x6b, 0x99, 0x78, 0x9c, 0xcd, 0x1b, 0xad, 0xc1, 0xf4, 0x36, 0x71, - 0x1a, 0xd1, 0xf6, 0xe2, 0x36, 0xa9, 0xed, 0xc8, 0x05, 0xc7, 0xa4, 0x46, 0xed, 0x79, 0xca, 0xe5, - 0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x86, 0x99, 0x56, 0xfb, 0x66, 0xc3, 0x0d, 0xb7, 0xd7, 0xfd, - 0x88, 0x39, 0x3a, 0xcd, 0xd7, 0xeb, 0x01, 0x09, 0xf9, 0x0b, 0x56, 0x76, 0xf4, 0xca, 0x60, 0x3d, - 0x95, 0x1c, 0x3a, 0x9c, 0xcb, 0x01, 0xdd, 0x81, 0xe3, 0x89, 0x89, 0x20, 0xa2, 0x6e, 0x8c, 0xe7, - 0x27, 0xf2, 0xa9, 0x66, 0x15, 0x10, 0x01, 0x6c, 0xb2, 0x50, 0x38, 0xbb, 0x0a, 0xf4, 0x0a, 0x80, - 0xdb, 0x5a, 0x71, 0x9a, 0x6e, 0x83, 0x5e, 0x47, 0xa7, 0xd9, 0x1c, 0xa1, 0x57, 0x13, 0x28, 0x57, - 0x24, 0x94, 0xee, 0xcd, 0xe2, 0xdf, 0x1e, 0xd6, 0xa8, 0xd1, 0x2a, 0x8c, 0x8b, 0x7f, 0x7b, 0x62, - 0x48, 0xa7, 0x54, 0xce, 0xc7, 0x71, 0x59, 0x42, 0x8d, 0x63, 0x02, 0x82, 0x13, 0x65, 0xd1, 0x16, - 0x9c, 0x96, 0x09, 0x27, 0xf5, 0xf9, 0x29, 0xc7, 0x20, 0x64, 0xd9, 0x73, 0x86, 0xf8, 0xcb, 0x97, - 0xf9, 0x4e, 0x84, 0xb8, 0x33, 0x1f, 0x7a, 0xae, 0xeb, 0xd3, 0x9c, 0xbf, 0x6b, 0x3e, 0x1e, 0x47, - 0x4e, 0x5c, 0x4d, 0x22, 0x71, 0x9a, 0x1e, 0xf9, 0x70, 0xdc, 0xf5, 0xb2, 0x66, 0xf5, 0x09, 0xc6, - 0xe8, 0xc3, 0xfc, 0x49, 0x77, 0xe7, 0x19, 0x9d, 0x89, 0xc7, 0xd9, 0x7c, 0x51, 0x19, 0xa6, 0x23, - 0x0e, 0x58, 0x72, 0x43, 0x9e, 0x9c, 0x83, 0x5e, 0xfb, 0x1e, 0xe2, 0x29, 0xf1, 0xe9, 0x6c, 0xde, - 0x48, 0xa3, 0x71, 0x56, 0x99, 0x77, 0xe7, 0xa6, 0xf8, 0x27, 0x16, 0x2d, 0xad, 0x09, 0xfa, 0xe8, - 0xd3, 0x30, 0xaa, 0xf7, 0x8f, 0x10, 0x5a, 0xce, 0x65, 0xcb, 0xc1, 0xda, 0xf6, 0xc2, 0xaf, 0x09, - 0x6a, 0x0b, 0xd1, 0x71, 0xd8, 0xe0, 0x88, 0x6a, 0x19, 0xa1, 0x18, 0x2e, 0xf4, 0x26, 0x14, 0xf5, - 0xee, 0xa5, 0x47, 0x20, 0x7b, 0xe5, 0xa0, 0x55, 0x18, 0xaa, 0x35, 0x5c, 0xe2, 0x45, 0xe5, 0x4a, - 0xa7, 0x80, 0x96, 0x8b, 0x82, 0x46, 0x2c, 0x45, 0x91, 0x53, 0x87, 0xc3, 0xb0, 0xe2, 0x60, 0xbf, - 0x0c, 0x23, 0xd5, 0x06, 0x21, 0x2d, 0xfe, 0xda, 0x08, 0x3d, 0xc9, 0x2e, 0x26, 0x4c, 0xb4, 0xb4, - 0x98, 0x68, 0xa9, 0xdf, 0x39, 0x98, 0x50, 0x29, 0xf1, 0xf6, 0xef, 0x14, 0xa0, 0xd4, 0x25, 0xb5, - 0x53, 0xc2, 0xde, 0x66, 0xf5, 0x64, 0x6f, 0x9b, 0x87, 0x89, 0xf8, 0x9f, 0xae, 0xca, 0x53, 0x2e, - 0xbb, 0xd7, 0x4d, 0x34, 0x4e, 0xd2, 0xf7, 0xfc, 0xfa, 0x42, 0x37, 0xd9, 0xf5, 0x75, 0x7d, 0x3f, - 0x64, 0x98, 0xea, 0xfb, 0x7b, 0xbf, 0x7b, 0xe7, 0x9a, 0x5d, 0xed, 0xaf, 0x17, 0xe0, 0xb8, 0xea, - 0xc2, 0xef, 0xdc, 0x8e, 0xbb, 0x96, 0xee, 0xb8, 0xfb, 0x60, 0xb4, 0xb6, 0xaf, 0xc2, 0x00, 0x8f, - 0xed, 0xd9, 0x83, 0xcc, 0xff, 0xa8, 0x19, 0x72, 0x5c, 0x89, 0x99, 0x46, 0xd8, 0xf1, 0x1f, 0xb2, - 0x60, 0x22, 0xf1, 0x8c, 0x0f, 0x61, 0xed, 0xad, 0xf7, 0xbd, 0xc8, 0xe5, 0x59, 0x12, 0xff, 0x59, - 0xe8, 0xdb, 0xf6, 0xc3, 0x28, 0xe9, 0xd1, 0x72, 0xd9, 0x0f, 0x23, 0xcc, 0x30, 0xf6, 0x9f, 0x59, - 0xd0, 0xbf, 0xe1, 0xb8, 0x5e, 0x24, 0xad, 0x1f, 0x56, 0x8e, 0xf5, 0xa3, 0x97, 0xef, 0x42, 0x2f, - 0xc2, 0x00, 0xd9, 0xdc, 0x24, 0xb5, 0x48, 0x8c, 0xaa, 0x8c, 0xf9, 0x30, 0xb0, 0xcc, 0xa0, 0x54, - 0x08, 0x65, 0x95, 0xf1, 0xbf, 0x58, 0x10, 0xa3, 0x1b, 0x30, 0x1c, 0xb9, 0x4d, 0x32, 0x5f, 0xaf, - 0x0b, 0x9f, 0x80, 0x7b, 0x08, 0x54, 0xb2, 0x21, 0x19, 0xe0, 0x98, 0x97, 0xfd, 0xa5, 0x02, 0x40, - 0x1c, 0xb0, 0xac, 0xdb, 0x27, 0x2e, 0xa4, 0xac, 0xc5, 0xe7, 0x32, 0xac, 0xc5, 0x28, 0x66, 0x98, - 0x61, 0x2a, 0x56, 0xdd, 0x54, 0xec, 0xa9, 0x9b, 0xfa, 0x0e, 0xd3, 0x4d, 0x8b, 0x30, 0x15, 0x07, - 0x5c, 0x33, 0xe3, 0x4d, 0xb2, 0xf3, 0x7b, 0x23, 0x89, 0xc4, 0x69, 0x7a, 0x9b, 0xc0, 0x59, 0x15, - 0x77, 0x4a, 0x9c, 0x85, 0xcc, 0xe1, 0x5d, 0xb7, 0xbe, 0x77, 0xe9, 0xa7, 0xd8, 0x1c, 0x5e, 0xc8, - 0x35, 0x87, 0xff, 0x94, 0x05, 0xc7, 0x92, 0xf5, 0xb0, 0xd7, 0xe1, 0x5f, 0xb0, 0xe0, 0x78, 0x9c, - 0xd9, 0x24, 0xed, 0x82, 0xf0, 0x42, 0xc7, 0x58, 0x5a, 0x39, 0x2d, 0x8e, 0x83, 0x8b, 0xac, 0x65, - 0xb1, 0xc6, 0xd9, 0x35, 0xda, 0xff, 0xa5, 0x0f, 0x66, 0xf2, 0x82, 0x70, 0xb1, 0xf7, 0x30, 0xce, - 0xed, 0xea, 0x0e, 0xb9, 0x25, 0x5e, 0x1d, 0xc4, 0xef, 0x61, 0x38, 0x18, 0x4b, 0x7c, 0x32, 0x99, - 0x4d, 0xa1, 0xc7, 0x64, 0x36, 0xdb, 0x30, 0x75, 0x6b, 0x9b, 0x78, 0xd7, 0xbc, 0xd0, 0x89, 0xdc, - 0x70, 0xd3, 0x65, 0x06, 0x74, 0x3e, 0x6f, 0x64, 0x42, 0xf6, 0xa9, 0x1b, 0x49, 0x82, 0x83, 0xfd, - 0xd2, 0x69, 0x03, 0x10, 0x37, 0x99, 0x6f, 0x24, 0x38, 0xcd, 0x34, 0x9d, 0x0b, 0xa8, 0xef, 0x01, - 0xe7, 0x02, 0x6a, 0xba, 0xc2, 0xed, 0x46, 0x3e, 0x76, 0x60, 0xd7, 0xd6, 0x35, 0x05, 0xc5, 0x1a, - 0x05, 0xfa, 0x24, 0x20, 0x3d, 0x99, 0x9b, 0x11, 0x03, 0xf5, 0xd9, 0xbb, 0xfb, 0x25, 0xb4, 0x9e, - 0xc2, 0x1e, 0xec, 0x97, 0xa6, 0x29, 0xb4, 0xec, 0xd1, 0xeb, 0x6f, 0x1c, 0x38, 0x2e, 0x83, 0x11, - 0xba, 0x01, 0x93, 0x14, 0xca, 0x56, 0x94, 0x0c, 0xb0, 0xca, 0xaf, 0xac, 0x4f, 0xdf, 0xdd, 0x2f, - 0x4d, 0xae, 0x27, 0x70, 0x79, 0xac, 0x53, 0x4c, 0x32, 0x52, 0x02, 0x0d, 0xf5, 0x9a, 0x12, 0xc8, - 0xfe, 0x82, 0x05, 0x27, 0xe9, 0x01, 0x57, 0x5f, 0xcd, 0xb1, 0xa2, 0x3b, 0x2d, 0x97, 0xdb, 0x69, - 0xc4, 0x51, 0xc3, 0x74, 0x75, 0x95, 0x32, 0xb7, 0xd2, 0x28, 0x2c, 0xdd, 0xe1, 0x77, 0x5c, 0xaf, - 0x9e, 0xdc, 0xe1, 0xaf, 0xb8, 0x5e, 0x1d, 0x33, 0x8c, 0x3a, 0xb2, 0x8a, 0xb9, 0x6f, 0x2e, 0xbe, - 0x4a, 0xd7, 0x2a, 0x6d, 0xcb, 0xb7, 0xb4, 0x19, 0xe8, 0x69, 0xdd, 0xa6, 0x2a, 0xdc, 0x27, 0x73, - 0xed, 0xa9, 0x9f, 0xb7, 0x40, 0xbc, 0xd1, 0xee, 0xe1, 0x4c, 0x7e, 0x0b, 0x46, 0x77, 0xd3, 0x89, - 0x2e, 0xcf, 0xe6, 0x3f, 0x5a, 0x17, 0xe1, 0xeb, 0x95, 0x88, 0x6e, 0x24, 0xb5, 0x34, 0x78, 0xd9, - 0x75, 0x10, 0xd8, 0x25, 0xc2, 0xac, 0x1a, 0xdd, 0x5b, 0xf3, 0x1c, 0x40, 0x9d, 0xd1, 0xb2, 0xec, - 0xd7, 0x05, 0x53, 0xe2, 0x5a, 0x52, 0x18, 0xac, 0x51, 0xd9, 0x3f, 0x57, 0x84, 0x11, 0x99, 0x58, - 0xb1, 0xed, 0xf5, 0xa2, 0x7b, 0x3c, 0x54, 0xa6, 0x75, 0xf4, 0x36, 0x4c, 0x05, 0xa4, 0xd6, 0x0e, - 0x42, 0x77, 0x97, 0x48, 0xb4, 0x58, 0x24, 0x73, 0x3c, 0xa8, 0x7e, 0x02, 0x79, 0xc0, 0x02, 0x39, - 0x25, 0x80, 0xcc, 0x68, 0x9c, 0x66, 0x84, 0x2e, 0xc0, 0x30, 0x53, 0xbd, 0x57, 0x62, 0x85, 0xb0, - 0x52, 0x7c, 0xad, 0x49, 0x04, 0x8e, 0x69, 0xd8, 0xe5, 0xa0, 0x7d, 0x93, 0x91, 0x27, 0xde, 0x2b, - 0x57, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x4c, 0xf2, 0x72, 0x81, 0xdf, 0x72, 0xb6, 0xb8, 0x49, - 0xb0, 0x5f, 0x05, 0x81, 0x99, 0x5c, 0x4b, 0xe0, 0x0e, 0xf6, 0x4b, 0xc7, 0x92, 0x30, 0xd6, 0xec, - 0x14, 0x17, 0xe6, 0xf9, 0xc7, 0x2b, 0xa1, 0x67, 0x46, 0xca, 0x61, 0x30, 0x46, 0x61, 0x9d, 0xce, - 0xfe, 0x3b, 0x0b, 0xa6, 0xb4, 0xa1, 0xea, 0x39, 0xaf, 0x81, 0xd1, 0x49, 0x85, 0x1e, 0x3a, 0xe9, - 0x70, 0x31, 0x09, 0x32, 0x47, 0xb8, 0xef, 0x3e, 0x8d, 0xb0, 0xfd, 0x69, 0x40, 0xe9, 0xac, 0x9d, - 0xe8, 0x0d, 0xee, 0x2e, 0xef, 0x06, 0xa4, 0xde, 0xc9, 0xe0, 0xaf, 0xc7, 0x77, 0x91, 0xef, 0x2b, - 0x79, 0x29, 0xac, 0xca, 0xdb, 0x3f, 0xdc, 0x07, 0x93, 0xc9, 0x88, 0x12, 0xe8, 0x32, 0x0c, 0x70, - 0x29, 0x5d, 0xb0, 0xef, 0xe0, 0x4f, 0xa6, 0xc5, 0xa1, 0x60, 0xf2, 0x8a, 0x10, 0xf4, 0x45, 0x79, - 0xf4, 0x36, 0x8c, 0xd4, 0xfd, 0x5b, 0xde, 0x2d, 0x27, 0xa8, 0xcf, 0x57, 0xca, 0x62, 0x87, 0xc8, - 0x54, 0x40, 0x2d, 0xc5, 0x64, 0x7a, 0x6c, 0x0b, 0xe6, 0x3b, 0x11, 0xa3, 0xb0, 0xce, 0x0e, 0x6d, - 0xb0, 0x44, 0x34, 0x9b, 0xee, 0xd6, 0x9a, 0xd3, 0xea, 0xf4, 0x76, 0x6a, 0x51, 0x12, 0x69, 0x9c, - 0xc7, 0x44, 0xb6, 0x1a, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x59, 0x98, 0x0e, 0x73, 0x4c, 0x62, 0x79, - 0x49, 0x9c, 0x3b, 0x59, 0x89, 0xb8, 0x32, 0x25, 0xcb, 0x78, 0x96, 0x55, 0x0d, 0xba, 0x0d, 0x48, - 0xa8, 0x9e, 0x37, 0x82, 0x76, 0x18, 0x2d, 0xb4, 0xbd, 0x7a, 0x43, 0x26, 0xaa, 0xc9, 0x4e, 0xf3, - 0x9e, 0xa2, 0xd6, 0xea, 0x66, 0x11, 0x66, 0xd3, 0x14, 0x38, 0xa3, 0x0e, 0xfb, 0xf3, 0x7d, 0x30, - 0x2b, 0xd3, 0xe4, 0x66, 0xbc, 0x11, 0xf9, 0x9c, 0x95, 0x78, 0x24, 0xf2, 0x4a, 0xfe, 0x46, 0xff, - 0xc0, 0x9e, 0x8a, 0x7c, 0x31, 0xfd, 0x54, 0xe4, 0xb5, 0x43, 0x36, 0xe3, 0xbe, 0x3d, 0x18, 0xf9, - 0x8e, 0x7d, 0xe5, 0xf1, 0xe5, 0x63, 0x60, 0x1c, 0xcd, 0x08, 0xf3, 0xf0, 0xdd, 0x15, 0x69, 0x3a, - 0xca, 0xb9, 0xfe, 0x5f, 0x16, 0x34, 0xc6, 0x61, 0x3f, 0x2a, 0x83, 0x7c, 0xb3, 0x7d, 0x56, 0xf1, - 0xa1, 0x3c, 0x49, 0xb3, 0x15, 0xed, 0x2d, 0xb9, 0x81, 0x68, 0x71, 0x26, 0xcf, 0x65, 0x41, 0x93, - 0xe6, 0x29, 0x31, 0x58, 0xf1, 0x41, 0xbb, 0x30, 0xb5, 0x55, 0x23, 0x89, 0xcc, 0xf2, 0xc5, 0xfc, - 0x75, 0x7b, 0x69, 0x71, 0xb9, 0x43, 0x5a, 0x79, 0x76, 0xf9, 0x4b, 0x91, 0xe0, 0x74, 0x15, 0x2c, - 0xab, 0xbd, 0x73, 0x2b, 0x5c, 0x6e, 0x38, 0x61, 0xe4, 0xd6, 0x16, 0x1a, 0x7e, 0x6d, 0xa7, 0x1a, - 0xf9, 0x81, 0x4c, 0x6b, 0x97, 0x79, 0xf7, 0x9a, 0xbf, 0x51, 0x4d, 0xd1, 0xa7, 0xb3, 0xda, 0x67, - 0x51, 0xe1, 0xcc, 0xba, 0xd0, 0x3a, 0x0c, 0x6e, 0xb9, 0x11, 0x26, 0x2d, 0x5f, 0xec, 0x16, 0x99, - 0x5b, 0xe1, 0x25, 0x4e, 0x92, 0xce, 0x32, 0x2f, 0x10, 0x58, 0x32, 0x41, 0x6f, 0xa8, 0x43, 0x60, - 0x20, 0x5f, 0x01, 0x9b, 0xf6, 0xbd, 0xcb, 0x3c, 0x06, 0x5e, 0x87, 0xa2, 0xb7, 0x19, 0x76, 0x8a, - 0x18, 0xb3, 0xbe, 0x52, 0x4d, 0x67, 0x7f, 0x5f, 0x5f, 0xa9, 0x62, 0x5a, 0x90, 0x3d, 0x2e, 0x0d, - 0x6b, 0xa1, 0x2b, 0x12, 0xf4, 0x64, 0xbe, 0xb5, 0x2d, 0x57, 0x17, 0xab, 0xe5, 0x74, 0xc6, 0x7b, - 0x06, 0xc6, 0xbc, 0x38, 0xba, 0x0e, 0xc3, 0x5b, 0x7c, 0xe3, 0xdb, 0x0c, 0x45, 0xaa, 0xec, 0xcc, - 0xc3, 0xe8, 0x92, 0x24, 0x4a, 0xe7, 0xb9, 0x57, 0x28, 0x1c, 0xb3, 0x42, 0x9f, 0xb7, 0xe0, 0x78, - 0x32, 0xd7, 0x38, 0x7b, 0x12, 0x26, 0xdc, 0xd4, 0x5e, 0xec, 0x25, 0xf9, 0x3b, 0x2b, 0x60, 0x54, - 0xc8, 0xcc, 0x2f, 0x99, 0x64, 0x38, 0xbb, 0x3a, 0xda, 0xd1, 0xc1, 0xcd, 0x7a, 0xa7, 0x4c, 0x32, - 0x89, 0xf0, 0x39, 0xbc, 0xa3, 0xf1, 0xc2, 0x12, 0xa6, 0x05, 0xd1, 0x06, 0xc0, 0x66, 0x83, 0x88, - 0xb8, 0x84, 0xc2, 0x29, 0x2a, 0xf3, 0xf4, 0x5f, 0x51, 0x54, 0x32, 0x27, 0x15, 0x15, 0xb3, 0x63, - 0x28, 0xd6, 0xf8, 0xd0, 0xa9, 0x54, 0x73, 0xbd, 0x3a, 0x09, 0x98, 0x71, 0x2b, 0x67, 0x2a, 0x2d, - 0x32, 0x8a, 0xf4, 0x54, 0xe2, 0x70, 0x2c, 0x38, 0x30, 0x5e, 0xa4, 0xb5, 0xbd, 0x19, 0x76, 0x4a, - 0x8c, 0xb0, 0x48, 0x5a, 0xdb, 0x89, 0x09, 0xc5, 0x79, 0x31, 0x38, 0x16, 0x1c, 0xe8, 0x92, 0xd9, - 0xa4, 0x0b, 0x88, 0x04, 0x33, 0x13, 0xf9, 0x4b, 0x66, 0x85, 0x93, 0xa4, 0x97, 0x8c, 0x40, 0x60, - 0xc9, 0x04, 0x7d, 0xca, 0x94, 0x76, 0x26, 0x19, 0xcf, 0xa7, 0xbb, 0x48, 0x3b, 0x06, 0xdf, 0xce, - 0xf2, 0xce, 0x2b, 0x50, 0xd8, 0xac, 0x31, 0xa3, 0x58, 0x8e, 0xcd, 0x60, 0x65, 0xd1, 0xe0, 0xc6, - 0x02, 0x8d, 0xaf, 0x2c, 0xe2, 0xc2, 0x66, 0x8d, 0x4e, 0x7d, 0xe7, 0x4e, 0x3b, 0x20, 0x2b, 0x6e, - 0x83, 0x88, 0x24, 0x09, 0x99, 0x53, 0x7f, 0x5e, 0x12, 0xa5, 0xa7, 0xbe, 0x42, 0xe1, 0x98, 0x15, - 0xe5, 0x1b, 0xcb, 0x60, 0xd3, 0xf9, 0x7c, 0x95, 0xa8, 0x95, 0xe6, 0x9b, 0x29, 0x85, 0xed, 0xc0, - 0xd8, 0x6e, 0xd8, 0xda, 0x26, 0x72, 0x57, 0x64, 0xe6, 0xba, 0x9c, 0x78, 0x0a, 0xd7, 0x05, 0xa1, - 0x1b, 0x44, 0x6d, 0xa7, 0x91, 0xda, 0xc8, 0x99, 0x6a, 0xe5, 0xba, 0xce, 0x0c, 0x9b, 0xbc, 0xe9, - 0x44, 0x78, 0x87, 0x07, 0x3d, 0x63, 0x86, 0xbb, 0x9c, 0x89, 0x90, 0x11, 0x17, 0x8d, 0x4f, 0x04, - 0x81, 0xc0, 0x92, 0x89, 0xea, 0x6c, 0x76, 0x00, 0x9d, 0xe8, 0xd2, 0xd9, 0xa9, 0xf6, 0xc6, 0x9d, - 0xcd, 0x0e, 0x9c, 0x98, 0x15, 0x3b, 0x68, 0x5a, 0x19, 0x69, 0xd9, 0x99, 0xd9, 0x2e, 0xe7, 0xa0, - 0xe9, 0x96, 0xc6, 0x9d, 0x1f, 0x34, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0xfa, 0x71, 0x2d, 0x19, 0xbf, - 0x4e, 0x24, 0x72, 0x78, 0x32, 0x27, 0xfc, 0x63, 0x3a, 0xc8, 0x1d, 0xff, 0x38, 0x85, 0xc2, 0x31, - 0x2b, 0x54, 0x87, 0xf1, 0x96, 0x11, 0x17, 0x95, 0x25, 0xa4, 0xc8, 0x91, 0x0b, 0xb2, 0x22, 0xa8, - 0x72, 0x0d, 0x91, 0x89, 0xc1, 0x09, 0x9e, 0xcc, 0x73, 0x8f, 0x3f, 0xf5, 0x63, 0xf9, 0x2a, 0x72, - 0x86, 0x3a, 0xe3, 0x35, 0x20, 0x1f, 0x6a, 0x81, 0xc0, 0x92, 0x09, 0xed, 0x0d, 0xf1, 0x40, 0xcd, - 0x0f, 0x59, 0xda, 0x97, 0x3c, 0x03, 0x7b, 0x96, 0x99, 0x48, 0x06, 0x03, 0x17, 0x28, 0x1c, 0xb3, - 0xa2, 0x3b, 0x39, 0x3d, 0xf0, 0x4e, 0xe5, 0xef, 0xe4, 0xc9, 0xe3, 0x8e, 0xed, 0xe4, 0xf4, 0xb0, - 0x2b, 0x8a, 0xa3, 0x4e, 0xc5, 0xae, 0x66, 0x29, 0x2b, 0x72, 0xda, 0xa5, 0x82, 0x5f, 0xa7, 0xdb, - 0xa5, 0x50, 0x38, 0x66, 0x65, 0xff, 0x70, 0x01, 0xce, 0x74, 0x5e, 0x6f, 0xb1, 0xed, 0xab, 0x12, - 0xfb, 0x1a, 0x25, 0x6c, 0x5f, 0x5c, 0x13, 0x13, 0x53, 0xf5, 0x1c, 0xce, 0xf6, 0x12, 0x4c, 0xa9, - 0x67, 0x84, 0x0d, 0xb7, 0xb6, 0xb7, 0x1e, 0x2b, 0xbf, 0x54, 0xe0, 0x97, 0x6a, 0x92, 0x00, 0xa7, - 0xcb, 0xa0, 0x79, 0x98, 0x30, 0x80, 0xe5, 0x25, 0x71, 0x6d, 0x8f, 0x93, 0x24, 0x98, 0x68, 0x9c, - 0xa4, 0xb7, 0x7f, 0xde, 0x82, 0x87, 0x72, 0xb2, 0x62, 0xf7, 0x1c, 0xad, 0x75, 0x13, 0x26, 0x5a, - 0x66, 0xd1, 0x2e, 0x01, 0xa6, 0x8d, 0xdc, 0xdb, 0xaa, 0xad, 0x09, 0x04, 0x4e, 0x32, 0xb5, 0x7f, - 0xb6, 0x00, 0xa7, 0x3b, 0xfa, 0xc5, 0x23, 0x0c, 0x27, 0xb6, 0x9a, 0xa1, 0xb3, 0x18, 0x90, 0x3a, - 0xf1, 0x22, 0xd7, 0x69, 0x54, 0x5b, 0xa4, 0xa6, 0x59, 0x2f, 0x99, 0x83, 0xf9, 0xa5, 0xb5, 0xea, - 0x7c, 0x9a, 0x02, 0xe7, 0x94, 0x44, 0x2b, 0x80, 0xd2, 0x18, 0x31, 0xc2, 0xec, 0x6a, 0x9a, 0xe6, - 0x87, 0x33, 0x4a, 0xa0, 0x97, 0x60, 0x4c, 0xf9, 0xdb, 0x6b, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23, - 0xb0, 0x49, 0x87, 0x2e, 0xf2, 0xec, 0x39, 0x22, 0xcf, 0x92, 0x30, 0x75, 0x4e, 0xc8, 0xd4, 0x38, - 0x02, 0x8c, 0x75, 0x9a, 0x85, 0x97, 0x7f, 0xef, 0x9b, 0x67, 0x3e, 0xf0, 0x87, 0xdf, 0x3c, 0xf3, - 0x81, 0x3f, 0xfd, 0xe6, 0x99, 0x0f, 0x7c, 0xdf, 0xdd, 0x33, 0xd6, 0xef, 0xdd, 0x3d, 0x63, 0xfd, - 0xe1, 0xdd, 0x33, 0xd6, 0x9f, 0xde, 0x3d, 0x63, 0xfd, 0xfb, 0xbb, 0x67, 0xac, 0x2f, 0xfd, 0xc5, - 0x99, 0x0f, 0xbc, 0x85, 0xe2, 0xf8, 0xc7, 0x17, 0xe8, 0xe8, 0x5c, 0xd8, 0xbd, 0xf8, 0x3f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0xfd, 0xca, 0x84, 0xba, 0xa5, 0x1e, 0x01, 0x00, + // 16056 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x90, 0x1c, 0xd7, + 0x79, 0x18, 0xae, 0x9e, 0xd9, 0xf3, 0xdb, 0xfb, 0x2d, 0x8e, 0xc5, 0x12, 0xc0, 0x80, 0x4d, 0x12, + 0x04, 0xaf, 0x85, 0xc0, 0x43, 0x84, 0x48, 0x8a, 0xe6, 0x9e, 0xc0, 0x12, 0xd8, 0xc5, 0xf0, 0xcd, + 0x02, 0x90, 0x28, 0x4a, 0x56, 0x63, 0xe6, 0xed, 0x6e, 0x6b, 0x67, 0xba, 0x87, 0xdd, 0x3d, 0x0b, + 0x2c, 0x7e, 0x72, 0xd9, 0x96, 0x7f, 0x96, 0x2d, 0xd9, 0xa9, 0x52, 0xb9, 0x9c, 0x38, 0x25, 0xbb, + 0x5c, 0x29, 0xc7, 0xf1, 0x11, 0xc5, 0x4e, 0x14, 0x39, 0xb6, 0x63, 0xf9, 0xca, 0x55, 0x65, 0x27, + 0x29, 0xc7, 0x71, 0x55, 0x24, 0x57, 0x5c, 0x59, 0x47, 0xeb, 0x54, 0xb9, 0xfc, 0x47, 0x6c, 0x97, + 0x93, 0x3f, 0x92, 0x2d, 0x27, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x63, 0x66, 0x16, 0x04, 0x56, 0x14, + 0x8b, 0xff, 0xcd, 0xbc, 0xef, 0x7b, 0xdf, 0x7b, 0xfd, 0xce, 0xef, 0x7d, 0x27, 0xd8, 0x5b, 0x17, + 0xc3, 0x19, 0xd7, 0x3f, 0xef, 0x34, 0xdd, 0xf3, 0x55, 0x3f, 0x20, 0xe7, 0xb7, 0x2f, 0x9c, 0xdf, + 0x20, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x66, 0x9a, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x19, 0xa7, + 0xe9, 0xce, 0x50, 0x9c, 0x99, 0xed, 0x0b, 0xd3, 0xcf, 0x6c, 0xb8, 0xd1, 0x66, 0xeb, 0xd6, 0x4c, + 0xd5, 0x6f, 0x9c, 0xdf, 0xf0, 0x37, 0xfc, 0xf3, 0x0c, 0xf5, 0x56, 0x6b, 0x9d, 0xfd, 0x63, 0x7f, + 0xd8, 0x2f, 0x4e, 0x62, 0xfa, 0xf9, 0xb8, 0x99, 0x86, 0x53, 0xdd, 0x74, 0x3d, 0x12, 0xec, 0x9c, + 0x6f, 0x6e, 0x6d, 0xb0, 0x76, 0x03, 0x12, 0xfa, 0xad, 0xa0, 0x4a, 0x92, 0x0d, 0xb7, 0xad, 0x15, + 0x9e, 0x6f, 0x90, 0xc8, 0xc9, 0xe8, 0xee, 0xf4, 0xf9, 0xbc, 0x5a, 0x41, 0xcb, 0x8b, 0xdc, 0x46, + 0xba, 0x99, 0x0f, 0x75, 0xaa, 0x10, 0x56, 0x37, 0x49, 0xc3, 0x49, 0xd5, 0x7b, 0x2e, 0xaf, 0x5e, + 0x2b, 0x72, 0xeb, 0xe7, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x86, 0x05, 0x67, 0x66, + 0x6f, 0x56, 0x16, 0xeb, 0x4e, 0x18, 0xb9, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, 0xaa, 0x44, 0x7e, 0x40, + 0x6e, 0xf8, 0xf5, 0x56, 0x83, 0x54, 0xd8, 0x40, 0xa0, 0xa7, 0x61, 0x60, 0x9b, 0xfd, 0x5f, 0x5e, + 0x98, 0xb2, 0xce, 0x58, 0xe7, 0x06, 0xe7, 0xc6, 0x7f, 0x67, 0xb7, 0xf4, 0x81, 0xbd, 0xdd, 0xd2, + 0xc0, 0x0d, 0x51, 0x8e, 0x15, 0x06, 0x3a, 0x0b, 0x7d, 0xeb, 0xe1, 0xda, 0x4e, 0x93, 0x4c, 0x15, + 0x18, 0xee, 0xa8, 0xc0, 0xed, 0x5b, 0xaa, 0xd0, 0x52, 0x2c, 0xa0, 0xe8, 0x3c, 0x0c, 0x36, 0x9d, + 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8a, 0x67, 0xac, 0x73, 0xbd, 0x73, 0x13, 0x02, 0x75, 0xb0, + 0x2c, 0x01, 0x38, 0xc6, 0xa1, 0xdd, 0x08, 0x88, 0x53, 0xbb, 0xe6, 0xd5, 0x77, 0xa6, 0x7a, 0xce, + 0x58, 0xe7, 0x06, 0xe2, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xa5, 0x02, 0x0c, 0xcc, 0xae, + 0xaf, 0xbb, 0x9e, 0x1b, 0xed, 0xa0, 0x1b, 0x30, 0xec, 0xf9, 0x35, 0x22, 0xff, 0xb3, 0xaf, 0x18, + 0x7a, 0xf6, 0xcc, 0x4c, 0x7a, 0x29, 0xcd, 0xac, 0x6a, 0x78, 0x73, 0xe3, 0x7b, 0xbb, 0xa5, 0x61, + 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xa1, 0xa6, 0x5f, 0x53, 0x64, 0x0b, 0x8c, 0x6c, 0x29, 0x8b, + 0x6c, 0x39, 0x46, 0x9b, 0x1b, 0xdb, 0xdb, 0x2d, 0x0d, 0x69, 0x05, 0x58, 0x27, 0x82, 0x6e, 0xc1, + 0x18, 0xfd, 0xeb, 0x45, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x1f, 0xc9, 0xa3, 0xab, 0xa1, 0xce, 0x4d, + 0xee, 0xed, 0x96, 0xc6, 0x12, 0x85, 0x38, 0x49, 0xd0, 0xfe, 0x61, 0x0b, 0xc6, 0x66, 0x9b, 0xcd, + 0xd9, 0xa0, 0xe1, 0x07, 0xe5, 0xc0, 0x5f, 0x77, 0xeb, 0x04, 0xbd, 0x08, 0x3d, 0x11, 0x9d, 0x35, + 0x3e, 0xc3, 0x8f, 0x88, 0xa1, 0xed, 0xa1, 0x73, 0xb5, 0xbf, 0x5b, 0x9a, 0x4c, 0xa0, 0xb3, 0xa9, + 0x64, 0x15, 0xd0, 0x6b, 0x30, 0x5e, 0xf7, 0xab, 0x4e, 0x7d, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, + 0x7f, 0x64, 0x6f, 0xb7, 0x34, 0x7e, 0x35, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x2e, 0x8c, 0xce, 0x46, + 0x91, 0x53, 0xdd, 0x24, 0x35, 0xbe, 0xa0, 0xd0, 0xf3, 0xd0, 0xe3, 0x39, 0x0d, 0xd9, 0x99, 0x33, + 0xb2, 0x33, 0xab, 0x4e, 0x83, 0x76, 0x66, 0xfc, 0xba, 0xe7, 0xbe, 0xdd, 0x12, 0x8b, 0x94, 0x96, + 0x61, 0x86, 0x8d, 0x9e, 0x05, 0xa8, 0x91, 0x6d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x29, 0xfa, 0x80, + 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x60, 0x70, 0x76, 0xdb, 0x77, 0x6b, 0x65, + 0xbf, 0x16, 0xa2, 0x2d, 0x18, 0x6b, 0x06, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xca, 0x3a, 0x53, 0x3c, + 0x37, 0xf4, 0xec, 0xb9, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x99, 0x3b, 0x2e, 0xda, + 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xaf, 0x0b, 0x70, 0x74, 0xf6, 0x6e, 0x2b, 0x20, 0x0b, + 0x6e, 0xb8, 0x95, 0xdc, 0x70, 0x35, 0x37, 0xdc, 0x5a, 0x8d, 0x47, 0x40, 0xad, 0xf4, 0x05, 0x51, + 0x8e, 0x15, 0x06, 0x7a, 0x06, 0xfa, 0xe9, 0xef, 0xeb, 0x78, 0x59, 0x7c, 0xf2, 0xa4, 0x40, 0x1e, + 0x5a, 0x70, 0x22, 0x67, 0x81, 0x83, 0xb0, 0xc4, 0x41, 0x2b, 0x30, 0x54, 0x65, 0xe7, 0xc3, 0xc6, + 0x8a, 0x5f, 0x23, 0x6c, 0x6d, 0x0d, 0xce, 0x3d, 0x45, 0xd1, 0xe7, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, + 0x14, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0xbb, 0xf7, 0x30, 0x4a, 0x90, + 0xb1, 0xd5, 0xcf, 0x69, 0x3b, 0xb7, 0x97, 0xed, 0xdc, 0xe1, 0xec, 0x5d, 0x8b, 0x2e, 0x40, 0xcf, + 0x96, 0xeb, 0xd5, 0xa6, 0xfa, 0x18, 0xad, 0x53, 0x74, 0xce, 0xaf, 0xb8, 0x5e, 0x6d, 0x7f, 0xb7, + 0x34, 0x61, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0x0f, 0x0b, 0x4a, 0x0c, 0xb6, 0xe4, 0xd6, + 0x49, 0x99, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x16, 0x20, 0x24, 0xd5, 0x80, + 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x51, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, + 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x22, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xa7, + 0xf3, 0x09, 0x7d, 0x04, 0xc6, 0xe2, 0xc6, 0xc2, 0xa6, 0x53, 0x95, 0x03, 0xc8, 0x76, 0x70, 0xc5, + 0x04, 0xe1, 0x24, 0xae, 0xfd, 0x0f, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, 0xd5, 0xfe, + 0x55, 0x0b, 0xfa, 0xe7, 0x5c, 0xaf, 0xe6, 0x7a, 0x1b, 0xe8, 0x53, 0x30, 0x40, 0xaf, 0xca, 0x9a, + 0x13, 0x39, 0xe2, 0x18, 0xfe, 0xa0, 0xb6, 0xb7, 0xd4, 0xcd, 0x35, 0xd3, 0xdc, 0xda, 0xa0, 0x05, + 0xe1, 0x0c, 0xc5, 0xa6, 0xbb, 0xed, 0xda, 0xad, 0x4f, 0x93, 0x6a, 0xb4, 0x42, 0x22, 0x27, 0xfe, + 0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x0a, 0xf4, 0x45, 0x4e, 0xb0, 0x41, 0x22, 0x71, 0x1e, 0x67, + 0x9e, 0x9b, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x55, 0x12, 0xdf, 0x52, 0x6b, 0xac, 0x2a, 0x16, + 0x24, 0xec, 0xff, 0xd3, 0x0f, 0x27, 0xe6, 0x2b, 0xcb, 0x39, 0xeb, 0xea, 0x2c, 0xf4, 0xd5, 0x02, + 0x77, 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x05, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x84, 0x61, 0x7e, + 0x3f, 0x5e, 0x76, 0xbc, 0x5a, 0x7c, 0x3c, 0x0a, 0xec, 0xe1, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, + 0x5c, 0x54, 0x67, 0x13, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xbc, 0x05, 0xe3, 0xbc, 0x99, 0xd9, 0x28, + 0x0a, 0xdc, 0x5b, 0xad, 0x88, 0x84, 0x53, 0xbd, 0xec, 0xa4, 0x9b, 0xcf, 0x1a, 0xad, 0xdc, 0x11, + 0x98, 0xb9, 0x91, 0xa0, 0xc2, 0x0f, 0xc1, 0x29, 0xd1, 0xee, 0x78, 0x12, 0x8c, 0x53, 0xcd, 0xa2, + 0xef, 0xb3, 0x60, 0xba, 0xea, 0x7b, 0x51, 0xe0, 0xd7, 0xeb, 0x24, 0x28, 0xb7, 0x6e, 0xd5, 0xdd, + 0x70, 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xe9, + 0xbd, 0xdd, 0xd2, 0xf4, 0x7c, 0x2e, 0x29, 0xdc, 0xa6, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0x95, + 0xc8, 0xd9, 0x20, 0x71, 0xe3, 0xfd, 0xdd, 0x37, 0x7e, 0x6c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, + 0x70, 0x06, 0x59, 0xf4, 0x36, 0x1c, 0xa1, 0xa5, 0xa9, 0x6f, 0x1d, 0xe8, 0xbe, 0xb9, 0xa9, 0xbd, + 0xdd, 0xd2, 0x91, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x1e, 0x0b, 0x4e, 0xc4, 0x9f, 0xbf, + 0x78, 0xa7, 0xe9, 0x78, 0xb5, 0xb8, 0xe1, 0xc1, 0xee, 0x1b, 0xa6, 0x67, 0xf2, 0x89, 0xf9, 0x3c, + 0x4a, 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x24, 0xed, 0x5a, 0xb2, 0x6d, 0xe8, 0xbe, 0xed, 0xe3, 0x7b, + 0xbb, 0xa5, 0xc9, 0xd5, 0x34, 0x0d, 0x9c, 0x45, 0x78, 0x7a, 0x1e, 0x8e, 0x66, 0xae, 0x4e, 0x34, + 0x0e, 0xc5, 0x2d, 0xc2, 0x99, 0xc0, 0x41, 0x4c, 0x7f, 0xa2, 0x23, 0xd0, 0xbb, 0xed, 0xd4, 0x5b, + 0x62, 0x63, 0x62, 0xfe, 0xe7, 0xa5, 0xc2, 0x45, 0xcb, 0xfe, 0x37, 0x45, 0x18, 0x9b, 0xaf, 0x2c, + 0xdf, 0xd3, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, 0x97, 0xe8, + 0x77, 0x67, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9c, 0xb3, 0x65, 0xef, 0xf3, 0x46, 0xdd, 0xce, + 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x03, 0x2e, 0xdd, + 0xfb, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x2d, 0xb7, 0xee, 0x46, 0x2e, 0x09, 0xd1, + 0xe3, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdd, 0xdb, 0x2d, 0x15, 0x67, 0x6b, + 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x83, 0x29, 0x06, 0x7a, 0x12, 0x7a, 0x6a, 0x81, 0xdf, 0x9c, 0x2a, + 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x6f, 0x17, 0xe0, + 0xe4, 0x3c, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0xee, 0x8b, 0x73, 0x30, 0xd0, 0xf0, 0x3d, 0x37, 0xf2, + 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x40, 0x4f, 0x33, 0x66, + 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, 0xc5, 0x28, + 0x8c, 0xeb, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x9c, 0x00, 0xe5, 0x11, 0xc4, 0x8d, 0x90, 0xe0, 0x04, + 0x28, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x83, 0x61, 0x62, 0x66, 0xbb, 0xda, 0x9a, 0x23, 0x8c, 0x55, + 0x50, 0x33, 0x19, 0x13, 0x31, 0x6e, 0xb0, 0xbe, 0x8e, 0xac, 0xc2, 0xd7, 0x0a, 0x80, 0xf8, 0x10, + 0x7e, 0x9b, 0x0d, 0xdc, 0xf5, 0xf4, 0xc0, 0x75, 0xbf, 0x25, 0xee, 0xd7, 0xe8, 0xfd, 0x4f, 0x0b, + 0x4e, 0xce, 0xbb, 0x5e, 0x8d, 0x04, 0x39, 0x0b, 0xf0, 0xc1, 0x3c, 0xe5, 0x0f, 0xc6, 0xa4, 0x18, + 0x4b, 0xac, 0xe7, 0x3e, 0x2c, 0x31, 0xfb, 0x2f, 0x2c, 0x40, 0xfc, 0xb3, 0xdf, 0x75, 0x1f, 0x7b, + 0x3d, 0xfd, 0xb1, 0xf7, 0x61, 0x59, 0xd8, 0x57, 0x61, 0x74, 0xbe, 0xee, 0x12, 0x2f, 0x5a, 0x2e, + 0xcf, 0xfb, 0xde, 0xba, 0xbb, 0x81, 0x5e, 0x82, 0xd1, 0xc8, 0x6d, 0x10, 0xbf, 0x15, 0x55, 0x48, + 0xd5, 0xf7, 0xd8, 0xcb, 0xd5, 0x3a, 0xd7, 0x3b, 0x87, 0xf6, 0x76, 0x4b, 0xa3, 0x6b, 0x06, 0x04, + 0x27, 0x30, 0xed, 0x9f, 0xa1, 0xe7, 0x56, 0xbd, 0x15, 0x46, 0x24, 0x58, 0x0b, 0x5a, 0x61, 0x34, + 0xd7, 0xa2, 0xbc, 0x67, 0x39, 0xf0, 0x69, 0x77, 0x5c, 0xdf, 0x43, 0x27, 0x8d, 0xe7, 0xf8, 0x80, + 0x7c, 0x8a, 0x8b, 0x67, 0xf7, 0x0c, 0x40, 0xe8, 0x6e, 0x78, 0x24, 0xd0, 0x9e, 0x0f, 0xa3, 0x6c, + 0xab, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x3a, 0x8c, 0xd4, 0x9d, 0x5b, 0xa4, 0x5e, 0x21, 0x75, 0x52, + 0x8d, 0xfc, 0x40, 0xc8, 0x37, 0x9e, 0xeb, 0xee, 0x1d, 0x70, 0x55, 0xaf, 0x3a, 0x37, 0xb1, 0xb7, + 0x5b, 0x1a, 0x31, 0x8a, 0xb0, 0x49, 0x9c, 0x1e, 0x1d, 0x7e, 0x93, 0x7e, 0x85, 0x53, 0xd7, 0x1f, + 0x9f, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x75, 0x74, 0xf4, 0xe4, 0x1d, 0x1d, 0xf6, 0x1f, 0xd1, 0x85, + 0xe6, 0x37, 0x9a, 0xbe, 0x47, 0xbc, 0x68, 0xde, 0xf7, 0x6a, 0x5c, 0x32, 0xf5, 0x92, 0x21, 0x3a, + 0x39, 0x9b, 0x10, 0x9d, 0x1c, 0x4b, 0xd7, 0xd0, 0xa4, 0x27, 0x1f, 0x86, 0xbe, 0x30, 0x72, 0xa2, + 0x56, 0x28, 0x06, 0xee, 0x61, 0xb9, 0xec, 0x2a, 0xac, 0x74, 0x7f, 0xb7, 0x34, 0xa6, 0xaa, 0xf1, + 0x22, 0x2c, 0x2a, 0xa0, 0x27, 0xa0, 0xbf, 0x41, 0xc2, 0xd0, 0xd9, 0x90, 0x6c, 0xc3, 0x98, 0xa8, + 0xdb, 0xbf, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0x47, 0xa0, 0x97, 0x04, 0x81, 0x1f, 0x88, 0x6f, 0x1b, + 0x11, 0x88, 0xbd, 0x8b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x18, 0x53, 0x7d, 0xe5, 0x6d, + 0x1d, 0xc2, 0x73, 0xed, 0x4d, 0x80, 0xaa, 0xfc, 0xc0, 0x90, 0x5d, 0xb3, 0x43, 0xcf, 0x9e, 0xcd, + 0xe4, 0x68, 0x52, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, 0xbf, 0x61, 0xc1, 0x64, + 0xe2, 0x8b, 0xae, 0xba, 0x61, 0x84, 0xde, 0x4a, 0x7d, 0xd5, 0x4c, 0x97, 0x8b, 0xcf, 0x0d, 0xf9, + 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x0c, 0xbd, 0x6e, 0x44, 0x1a, 0xf2, 0x63, 0x1e, + 0x69, 0xfb, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x2c, 0xd3, 0x9a, 0x98, 0x13, 0xb0, 0x7f, 0xbb, 0x08, + 0x83, 0x7c, 0x7f, 0xaf, 0x38, 0xcd, 0x43, 0x98, 0x8b, 0xa7, 0x60, 0xd0, 0x6d, 0x34, 0x5a, 0x91, + 0x73, 0x4b, 0xdc, 0x7b, 0x03, 0xfc, 0x0c, 0x5a, 0x96, 0x85, 0x38, 0x86, 0xa3, 0x65, 0xe8, 0x61, + 0x5d, 0xe1, 0x5f, 0xf9, 0x78, 0xf6, 0x57, 0x8a, 0xbe, 0xcf, 0x2c, 0x38, 0x91, 0xc3, 0x59, 0x4e, + 0xb5, 0xaf, 0x68, 0x11, 0x66, 0x24, 0x90, 0x03, 0x70, 0xcb, 0xf5, 0x9c, 0x60, 0x87, 0x96, 0x4d, + 0x15, 0x19, 0xc1, 0x67, 0xda, 0x13, 0x9c, 0x53, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18, 0x80, 0x35, + 0xa2, 0xd3, 0x2f, 0xc2, 0xa0, 0x42, 0x3e, 0x08, 0xe7, 0x38, 0xfd, 0x11, 0x18, 0x4b, 0xb4, 0xd5, + 0xa9, 0xfa, 0xb0, 0xce, 0x78, 0xfe, 0x1a, 0x3b, 0x32, 0x44, 0xaf, 0x17, 0xbd, 0x6d, 0x71, 0x37, + 0xdd, 0x85, 0x23, 0xf5, 0x8c, 0x23, 0x5f, 0xcc, 0x6b, 0xf7, 0x57, 0xc4, 0x49, 0xf1, 0xd9, 0x47, + 0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0xe3, 0x44, 0x2c, 0xb4, 0x3b, 0x11, 0xe9, 0x79, 0x77, 0x44, 0x75, + 0xfe, 0x0a, 0xd9, 0x51, 0x87, 0xea, 0xb7, 0xb2, 0xfb, 0xa7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x48, + 0x10, 0x28, 0x5e, 0x21, 0x3b, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0xb6, 0xfd, 0xba, 0xaf, 0x58, 0x30, + 0xa2, 0xbe, 0xee, 0x10, 0xce, 0x85, 0x39, 0xf3, 0x5c, 0x38, 0xd5, 0x76, 0x81, 0xe7, 0x9c, 0x08, + 0x5f, 0x2b, 0xc0, 0x09, 0x85, 0x43, 0x1f, 0x51, 0xfc, 0x8f, 0x58, 0x55, 0xe7, 0x61, 0xd0, 0x53, + 0xe2, 0x44, 0xcb, 0x94, 0xe3, 0xc5, 0xc2, 0xc4, 0x18, 0x87, 0x5e, 0x79, 0x5e, 0x7c, 0x69, 0x0f, + 0xeb, 0x72, 0x76, 0x71, 0xb9, 0xcf, 0x41, 0xb1, 0xe5, 0xd6, 0xc4, 0x05, 0xf3, 0x41, 0x39, 0xda, + 0xd7, 0x97, 0x17, 0xf6, 0x77, 0x4b, 0x0f, 0xe7, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0x5c, 0x5f, + 0x5e, 0xc0, 0xb4, 0x32, 0x9a, 0x85, 0x31, 0xa9, 0x55, 0xbb, 0x41, 0xf9, 0x52, 0xdf, 0x13, 0xf7, + 0x90, 0x12, 0x96, 0x63, 0x13, 0x8c, 0x93, 0xf8, 0x68, 0x01, 0xc6, 0xb7, 0x5a, 0xb7, 0x48, 0x9d, + 0x44, 0xfc, 0x83, 0xaf, 0x10, 0x2e, 0x4a, 0x1e, 0x8c, 0x9f, 0xb0, 0x57, 0x12, 0x70, 0x9c, 0xaa, + 0x61, 0xff, 0x0d, 0xbb, 0x0f, 0xc4, 0xe8, 0x69, 0xfc, 0xcd, 0xb7, 0x72, 0x39, 0x77, 0xb3, 0x2a, + 0xae, 0x90, 0x9d, 0x35, 0x9f, 0xf2, 0x21, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x3d, 0x6d, 0xd7, 0xfc, + 0x2f, 0x15, 0xe0, 0xa8, 0x1a, 0x01, 0x83, 0x5b, 0xfe, 0x76, 0x1f, 0x83, 0x0b, 0x30, 0x54, 0x23, + 0xeb, 0x4e, 0xab, 0x1e, 0x29, 0xbd, 0x46, 0x2f, 0x57, 0xb5, 0x2d, 0xc4, 0xc5, 0x58, 0xc7, 0x39, + 0xc0, 0xb0, 0xfd, 0xc2, 0x08, 0xbb, 0x88, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d, + 0xf3, 0x08, 0xf4, 0xba, 0x0d, 0xca, 0x98, 0x15, 0x4c, 0x7e, 0x6b, 0x99, 0x16, 0x62, 0x0e, 0x43, + 0x8f, 0x41, 0x7f, 0xd5, 0x6f, 0x34, 0x1c, 0xaf, 0xc6, 0xae, 0xbc, 0xc1, 0xb9, 0x21, 0xca, 0xbb, + 0xcd, 0xf3, 0x22, 0x2c, 0x61, 0x94, 0xf9, 0x76, 0x82, 0x0d, 0x2e, 0xec, 0x11, 0xcc, 0xf7, 0x6c, + 0xb0, 0x11, 0x62, 0x56, 0x4a, 0xdf, 0xaa, 0xb7, 0xfd, 0x60, 0xcb, 0xf5, 0x36, 0x16, 0xdc, 0x40, + 0x6c, 0x09, 0x75, 0x17, 0xde, 0x54, 0x10, 0xac, 0x61, 0xa1, 0x25, 0xe8, 0x6d, 0xfa, 0x41, 0x14, + 0x4e, 0xf5, 0xb1, 0xe1, 0x7e, 0x38, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0xf6, 0x83, 0x28, 0xfe, 0x00, + 0xfa, 0x2f, 0xc4, 0xbc, 0x3a, 0xba, 0x0a, 0xfd, 0xc4, 0xdb, 0x5e, 0x0a, 0xfc, 0xc6, 0xd4, 0x64, + 0x3e, 0xa5, 0x45, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, 0x2c, 0x49, 0xa0, 0x0f, 0x43, + 0x91, 0x78, 0xdb, 0x53, 0xfd, 0x8c, 0xd2, 0x74, 0x0e, 0xa5, 0x1b, 0x4e, 0x10, 0x9f, 0xf9, 0x8b, + 0xde, 0x36, 0xa6, 0x75, 0xd0, 0xc7, 0x60, 0x50, 0x1e, 0x18, 0xa1, 0x90, 0xa2, 0x66, 0x2e, 0x58, + 0x79, 0xcc, 0x60, 0xf2, 0x76, 0xcb, 0x0d, 0x48, 0x83, 0x78, 0x51, 0x18, 0x9f, 0x90, 0x12, 0x1a, + 0xe2, 0x98, 0x1a, 0xaa, 0xc2, 0x70, 0x40, 0x42, 0xf7, 0x2e, 0x29, 0xfb, 0x75, 0xb7, 0xba, 0x33, + 0x75, 0x9c, 0x75, 0xef, 0x89, 0xb6, 0x43, 0x86, 0xb5, 0x0a, 0xb1, 0x94, 0x5f, 0x2f, 0xc5, 0x06, + 0x51, 0xf4, 0x06, 0x8c, 0x04, 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xad, 0x4c, 0x29, 0xad, 0xdc, 0x08, + 0xd6, 0x01, 0xfc, 0x39, 0x11, 0x37, 0x13, 0x43, 0xb0, 0x49, 0x01, 0x7d, 0x4c, 0xaa, 0x1c, 0x56, + 0xfc, 0x96, 0x17, 0x85, 0x53, 0x83, 0xac, 0xdf, 0x99, 0xba, 0xe9, 0x1b, 0x31, 0x5e, 0x52, 0x27, + 0xc1, 0x2b, 0x63, 0x83, 0x14, 0xfa, 0x04, 0x8c, 0xf0, 0xff, 0x5c, 0xa5, 0x1a, 0x4e, 0x1d, 0x65, + 0xb4, 0xcf, 0xe4, 0xd3, 0xe6, 0x88, 0x73, 0x47, 0x05, 0xf1, 0x11, 0xbd, 0x34, 0xc4, 0x26, 0x35, + 0x84, 0x61, 0xa4, 0xee, 0x6e, 0x13, 0x8f, 0x84, 0x61, 0x39, 0xf0, 0x6f, 0x11, 0x21, 0x21, 0x3e, + 0x91, 0xad, 0x82, 0xf5, 0x6f, 0x11, 0xf1, 0x08, 0xd4, 0xeb, 0x60, 0x93, 0x04, 0xba, 0x0e, 0xa3, + 0xf4, 0x49, 0xee, 0xc6, 0x44, 0x87, 0x3a, 0x11, 0x65, 0x0f, 0x67, 0x6c, 0x54, 0xc2, 0x09, 0x22, + 0xe8, 0x1a, 0x0c, 0xb3, 0x31, 0x6f, 0x35, 0x39, 0xd1, 0x63, 0x9d, 0x88, 0x32, 0x83, 0x82, 0x8a, + 0x56, 0x05, 0x1b, 0x04, 0xd0, 0xeb, 0x30, 0x58, 0x77, 0xd7, 0x49, 0x75, 0xa7, 0x5a, 0x27, 0x53, + 0xc3, 0x8c, 0x5a, 0xe6, 0x61, 0x78, 0x55, 0x22, 0x71, 0xfe, 0x5c, 0xfd, 0xc5, 0x71, 0x75, 0x74, + 0x03, 0x8e, 0x45, 0x24, 0x68, 0xb8, 0x9e, 0x43, 0x0f, 0x31, 0xf1, 0x24, 0x64, 0x9a, 0xf1, 0x11, + 0xb6, 0xba, 0x4e, 0x8b, 0xd9, 0x38, 0xb6, 0x96, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0x3b, 0x30, 0x95, + 0x01, 0xe1, 0xeb, 0xf6, 0x08, 0xa3, 0xfc, 0x8a, 0xa0, 0x3c, 0xb5, 0x96, 0x83, 0xb7, 0xdf, 0x06, + 0x86, 0x73, 0xa9, 0xa3, 0x6b, 0x30, 0xc6, 0x4e, 0xce, 0x72, 0xab, 0x5e, 0x17, 0x0d, 0x8e, 0xb2, + 0x06, 0x1f, 0x93, 0x7c, 0xc4, 0xb2, 0x09, 0xde, 0xdf, 0x2d, 0x41, 0xfc, 0x0f, 0x27, 0x6b, 0xa3, + 0x5b, 0x4c, 0x09, 0xdb, 0x0a, 0xdc, 0x68, 0x87, 0xee, 0x2a, 0x72, 0x27, 0x9a, 0x1a, 0x6b, 0x2b, + 0x90, 0xd2, 0x51, 0x95, 0xa6, 0x56, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x2a, 0x08, 0xa3, 0x9a, 0xeb, + 0x4d, 0x8d, 0xf3, 0xf7, 0x94, 0x3c, 0x49, 0x2b, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xc0, 0xd2, 0x1f, + 0xd7, 0xe8, 0x8d, 0x3b, 0xc1, 0x10, 0x63, 0x05, 0xac, 0x04, 0xe0, 0x18, 0x87, 0x32, 0xc1, 0x51, + 0xb4, 0x33, 0x85, 0x18, 0xaa, 0x3a, 0x10, 0xd7, 0xd6, 0x3e, 0x86, 0x69, 0xb9, 0x7d, 0x0b, 0x46, + 0xd5, 0x31, 0xc1, 0xc6, 0x04, 0x95, 0xa0, 0x97, 0xb1, 0x7d, 0x42, 0x7c, 0x3a, 0x48, 0xbb, 0xc0, + 0x58, 0x42, 0xcc, 0xcb, 0x59, 0x17, 0xdc, 0xbb, 0x64, 0x6e, 0x27, 0x22, 0x5c, 0x16, 0x51, 0xd4, + 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x9f, 0xe3, 0x5b, 0xa2, 0x8b, 0x7b, 0xf1, + 0x69, 0x18, 0x60, 0x86, 0x1f, 0x7e, 0xc0, 0xb5, 0xb3, 0xbd, 0x31, 0xc3, 0x7c, 0x59, 0x94, 0x63, + 0x85, 0x81, 0x5e, 0x86, 0x91, 0xaa, 0xde, 0x80, 0xb8, 0xd4, 0xd5, 0x31, 0x62, 0xb4, 0x8e, 0x4d, + 0x5c, 0x74, 0x11, 0x06, 0x98, 0x8d, 0x53, 0xd5, 0xaf, 0x0b, 0x6e, 0x53, 0x72, 0x26, 0x03, 0x65, + 0x51, 0xbe, 0xaf, 0xfd, 0xc6, 0x0a, 0x1b, 0x9d, 0x85, 0x3e, 0xda, 0x85, 0xe5, 0xb2, 0xb8, 0x4e, + 0x95, 0x24, 0xf0, 0x32, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x86, 0xc5, 0x78, 0xa9, 0xf4, 0x99, 0x8f, + 0x2e, 0xb3, 0x4b, 0x83, 0xdd, 0x20, 0x9a, 0x16, 0xfe, 0x51, 0xed, 0x26, 0x50, 0xb0, 0xfd, 0xc4, + 0x7f, 0x6c, 0xd4, 0x44, 0x6f, 0x26, 0x6f, 0x06, 0xce, 0x50, 0x3c, 0x2f, 0x87, 0x20, 0x79, 0x3b, + 0x3c, 0x14, 0x5f, 0x71, 0xb4, 0x3f, 0xed, 0xae, 0x08, 0xfb, 0x47, 0x0a, 0xda, 0x2a, 0xa9, 0x44, + 0x4e, 0x44, 0x50, 0x19, 0xfa, 0x6f, 0x3b, 0x6e, 0xe4, 0x7a, 0x1b, 0x82, 0xef, 0x6b, 0x7f, 0xd1, + 0xb1, 0x4a, 0x37, 0x79, 0x05, 0xce, 0xbd, 0x88, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0x96, 0xe7, + 0x51, 0x8a, 0x85, 0x6e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0xde, 0x02, + 0x90, 0x27, 0x04, 0xa9, 0x09, 0xd9, 0xe1, 0xd3, 0x9d, 0x89, 0xae, 0xa9, 0x3a, 0x5c, 0x38, 0x19, + 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0xe6, 0x54, 0xef, 0x0c, 0xfa, 0x38, 0xdd, 0xa2, 0x4e, 0x10, + 0x91, 0xda, 0x6c, 0x24, 0x06, 0xe7, 0xc9, 0xee, 0x1e, 0x87, 0x6b, 0x6e, 0x83, 0xe8, 0xdb, 0x59, + 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x57, 0x8a, 0x30, 0x95, 0xd7, 0x5d, 0xba, 0x69, 0xc8, 0x1d, 0x37, + 0x9a, 0xa7, 0x6c, 0xad, 0x65, 0x6e, 0x9a, 0x45, 0x51, 0x8e, 0x15, 0x06, 0x5d, 0xbd, 0xa1, 0xbb, + 0x21, 0xdf, 0xf6, 0xbd, 0xf1, 0xea, 0xad, 0xb0, 0x52, 0x2c, 0xa0, 0x14, 0x2f, 0x20, 0x4e, 0x28, + 0x8c, 0xef, 0xb4, 0x55, 0x8e, 0x59, 0x29, 0x16, 0x50, 0x5d, 0xca, 0xd8, 0xd3, 0x41, 0xca, 0x68, + 0x0c, 0x51, 0xef, 0xfd, 0x1d, 0x22, 0xf4, 0x49, 0x80, 0x75, 0xd7, 0x73, 0xc3, 0x4d, 0x46, 0xbd, + 0xef, 0xc0, 0xd4, 0x15, 0x53, 0xbc, 0xa4, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x86, 0xd4, 0x01, + 0xb2, 0xbc, 0xc0, 0x54, 0xff, 0x9a, 0x29, 0x55, 0x7c, 0x9a, 0x2e, 0x60, 0x1d, 0xcf, 0xfe, 0x74, + 0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0xd5, 0xed, 0xf8, 0x16, 0xda, 0x8f, 0xaf, 0xfd, 0x63, + 0x83, 0x30, 0x66, 0x34, 0xd6, 0x0a, 0xbb, 0x38, 0x73, 0x2f, 0xd1, 0x0b, 0xc8, 0x89, 0x88, 0xd8, + 0x7f, 0x76, 0xe7, 0xad, 0xa2, 0x5f, 0x52, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x93, 0x30, 0x58, 0x77, + 0x42, 0x26, 0xb1, 0x24, 0x62, 0xdf, 0x75, 0x43, 0x2c, 0x7e, 0x10, 0x3a, 0x61, 0xa4, 0xdd, 0xfa, + 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x94, 0x94, 0xbf, 0x92, 0xd6, 0x9d, 0xaa, 0x13, 0x94, 0x09, 0xdb, + 0xc1, 0x1c, 0x86, 0x2e, 0xb2, 0xa3, 0x95, 0xae, 0x8a, 0x79, 0xca, 0x8d, 0xb2, 0x65, 0xd6, 0x6b, + 0x30, 0xd9, 0x0a, 0x86, 0x0d, 0xcc, 0xf8, 0x4d, 0xd6, 0xd7, 0xe6, 0x4d, 0xf6, 0x04, 0xf4, 0xb3, + 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0x93, 0x0b, 0x66, 0xa0, 0xbb, 0x05, + 0x43, 0x5f, 0x7d, 0x62, 0x51, 0x33, 0xb3, 0x8b, 0x01, 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0x0c, + 0xfd, 0xac, 0x05, 0xc8, 0xa9, 0xd3, 0xd7, 0x32, 0x2d, 0x56, 0x8f, 0x1b, 0x60, 0xac, 0xf6, 0xcb, + 0x1d, 0x87, 0xbd, 0x15, 0xce, 0xcc, 0xa6, 0x6a, 0x73, 0x49, 0xe9, 0x4b, 0xa2, 0x8b, 0x28, 0x8d, + 0xa0, 0x5f, 0x46, 0x57, 0xdd, 0x30, 0xfa, 0xec, 0x1f, 0x27, 0x2e, 0xa7, 0x8c, 0x2e, 0xa1, 0xeb, + 0xfa, 0xe3, 0x6b, 0xe8, 0x80, 0x8f, 0xaf, 0x91, 0xdc, 0x87, 0xd7, 0x77, 0x26, 0x1e, 0x30, 0xc3, + 0xec, 0xcb, 0x1f, 0xeb, 0xf0, 0x80, 0x11, 0xe2, 0xf4, 0x6e, 0x9e, 0x31, 0x65, 0xa1, 0x07, 0x1e, + 0x61, 0x5d, 0x6e, 0xff, 0x08, 0xbe, 0x1e, 0x92, 0x60, 0xee, 0x84, 0x54, 0x13, 0xef, 0xeb, 0xbc, + 0x87, 0xa6, 0x37, 0xfe, 0x1e, 0x0b, 0xa6, 0xd2, 0x03, 0xc4, 0xbb, 0x34, 0x35, 0xca, 0xfa, 0x6f, + 0xb7, 0x1b, 0x19, 0xd1, 0x79, 0x69, 0xee, 0x3a, 0x35, 0x9b, 0x43, 0x0b, 0xe7, 0xb6, 0x32, 0xdd, + 0x82, 0xe3, 0x39, 0xf3, 0x9e, 0x21, 0xb5, 0x5e, 0xd0, 0xa5, 0xd6, 0x1d, 0x64, 0x9d, 0x33, 0x72, + 0x66, 0x66, 0xde, 0x68, 0x39, 0x5e, 0xe4, 0x46, 0x3b, 0xba, 0x94, 0xdb, 0x03, 0x73, 0x40, 0xd0, + 0x27, 0xa0, 0xb7, 0xee, 0x7a, 0xad, 0x3b, 0xe2, 0xa6, 0x3c, 0x9b, 0xfd, 0x88, 0xf1, 0x5a, 0x77, + 0xcc, 0x21, 0x2e, 0xd1, 0x0d, 0xc9, 0xca, 0xf7, 0x77, 0x4b, 0x28, 0x8d, 0x80, 0x39, 0x55, 0xfb, + 0x49, 0x18, 0x5d, 0x70, 0x48, 0xc3, 0xf7, 0x16, 0xbd, 0x5a, 0xd3, 0x77, 0xbd, 0x08, 0x4d, 0x41, + 0x0f, 0x63, 0x11, 0xf9, 0x05, 0xd9, 0x43, 0x87, 0x10, 0xb3, 0x12, 0x7b, 0x03, 0x8e, 0x2e, 0xf8, + 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9b, 0x2d, 0x2f, 0x6b, 0x52, 0xbf, 0x55, 0x29, 0x75, 0xb2, 0xf2, + 0xdf, 0xf4, 0x5a, 0x4d, 0xbe, 0x94, 0x96, 0xdc, 0x3a, 0xc9, 0x91, 0xcd, 0xfe, 0x58, 0xc1, 0x68, + 0x29, 0xc6, 0x57, 0x9a, 0x45, 0x2b, 0xd7, 0x28, 0xe1, 0x0d, 0x18, 0x58, 0x77, 0x49, 0xbd, 0x86, + 0xc9, 0xba, 0x98, 0x8d, 0xc7, 0xf3, 0xcd, 0x16, 0x97, 0x28, 0xa6, 0x52, 0x81, 0x32, 0x99, 0xd5, + 0x92, 0xa8, 0x8c, 0x15, 0x19, 0xb4, 0x05, 0xe3, 0x72, 0xce, 0x24, 0x54, 0x9c, 0xda, 0x4f, 0xb4, + 0x5b, 0x84, 0x26, 0x71, 0x66, 0xc2, 0x8d, 0x13, 0x64, 0x70, 0x8a, 0x30, 0x3a, 0x09, 0x3d, 0x0d, + 0xca, 0x9f, 0xf4, 0xb0, 0xe1, 0x67, 0x42, 0x2a, 0x26, 0x6f, 0x63, 0xa5, 0xf6, 0x4f, 0x58, 0x70, + 0x3c, 0x35, 0x32, 0x42, 0xee, 0x78, 0x9f, 0x67, 0x21, 0x29, 0x07, 0x2c, 0x74, 0x96, 0x03, 0xda, + 0xff, 0xc8, 0x82, 0x23, 0x8b, 0x8d, 0x66, 0xb4, 0xb3, 0xe0, 0x9a, 0x16, 0x04, 0x2f, 0x42, 0x5f, + 0x83, 0xd4, 0xdc, 0x56, 0x43, 0xcc, 0x5c, 0x49, 0xde, 0xe1, 0x2b, 0xac, 0x94, 0x9e, 0x03, 0x95, + 0xc8, 0x0f, 0x9c, 0x0d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x9c, 0x90, 0x7b, 0x97, 0x5c, 0x75, 0x1b, + 0x6e, 0x74, 0x6f, 0xbb, 0x4b, 0x28, 0xff, 0x25, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0x98, + 0x5c, 0xf7, 0xb3, 0xb5, 0x5a, 0x40, 0xc2, 0x10, 0x4d, 0x43, 0xc1, 0x6d, 0x8a, 0x5e, 0x82, 0xe8, + 0x65, 0x61, 0xb9, 0x8c, 0x0b, 0x6e, 0x53, 0x3e, 0xba, 0x18, 0x9b, 0x50, 0x34, 0xed, 0x20, 0x2e, + 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x39, 0x18, 0xf0, 0xfc, 0x1a, 0x7f, 0xb7, 0x08, 0x4d, 0x38, 0xc5, + 0x5c, 0x15, 0x65, 0x58, 0x41, 0x51, 0x19, 0x06, 0xb9, 0x95, 0x6c, 0xbc, 0x68, 0xbb, 0xb2, 0xb5, + 0x65, 0x5f, 0xb6, 0x26, 0x6b, 0xe2, 0x98, 0x88, 0xfd, 0x5b, 0x16, 0x0c, 0xcb, 0x2f, 0xeb, 0xf2, + 0x45, 0x49, 0xb7, 0x56, 0xfc, 0x9a, 0x8c, 0xb7, 0x16, 0x7d, 0x11, 0x32, 0x88, 0xf1, 0x10, 0x2c, + 0x1e, 0xe8, 0x21, 0x78, 0x01, 0x86, 0x9c, 0x66, 0xb3, 0x6c, 0xbe, 0x22, 0xd9, 0x52, 0x9a, 0x8d, + 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x78, 0x01, 0x46, 0xe5, 0x17, 0x54, 0x5a, 0xb7, 0x42, 0x12, 0xa1, + 0x35, 0x18, 0x74, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x1f, 0xc9, 0x96, 0x6e, 0x1a, 0x53, 0x1a, 0xb3, + 0xc3, 0xb3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x3a, 0x4c, 0x78, 0x7e, 0xc4, 0x58, 0x23, 0x05, 0x6f, + 0xa7, 0x70, 0x4e, 0x52, 0x3f, 0x21, 0xa8, 0x4f, 0xac, 0x26, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x28, + 0x25, 0xc6, 0xc5, 0x7c, 0x51, 0x9f, 0x3e, 0x71, 0xd9, 0x02, 0x63, 0xfb, 0xd7, 0x2d, 0x18, 0x94, + 0x68, 0x87, 0x61, 0x5b, 0xb0, 0x02, 0xfd, 0x21, 0x9b, 0x04, 0x39, 0x34, 0x76, 0xbb, 0x8e, 0xf3, + 0xf9, 0x8a, 0x39, 0x3e, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0x53, 0x18, 0xaa, 0xee, 0xbf, 0x4b, 0x14, + 0x86, 0xaa, 0x3f, 0x39, 0x97, 0xd2, 0x9f, 0xb2, 0x3e, 0x6b, 0x12, 0x78, 0xfa, 0x30, 0x69, 0x06, + 0x64, 0xdd, 0xbd, 0x93, 0x7c, 0x98, 0x94, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x16, 0x0c, 0x57, 0xa5, + 0xa6, 0x28, 0xde, 0xe1, 0x67, 0xdb, 0x6a, 0x2d, 0x95, 0x82, 0x9b, 0x4b, 0x3a, 0xe7, 0xb5, 0xfa, + 0xd8, 0xa0, 0x66, 0x5a, 0x81, 0x15, 0x3b, 0x59, 0x81, 0xc5, 0x74, 0xf3, 0x6d, 0xa2, 0x7e, 0xd2, + 0x82, 0x3e, 0xae, 0x21, 0xe8, 0x4e, 0x41, 0xa3, 0xe9, 0xfb, 0xe3, 0xb1, 0xbb, 0x41, 0x0b, 0x05, + 0x67, 0x83, 0x56, 0x60, 0x90, 0xfd, 0x60, 0x1a, 0x8e, 0x62, 0xbe, 0xcf, 0x18, 0x6f, 0x55, 0xef, + 0xe0, 0x0d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x1f, 0x2d, 0xd2, 0xd3, 0x2d, 0x46, 0x35, 0x2e, 0x7d, + 0xeb, 0xc1, 0x5d, 0xfa, 0x85, 0x07, 0x75, 0xe9, 0x6f, 0xc0, 0x58, 0x55, 0xb3, 0x0e, 0x88, 0x67, + 0xf2, 0x5c, 0xdb, 0x45, 0xa2, 0x19, 0x12, 0x70, 0x19, 0xea, 0xbc, 0x49, 0x04, 0x27, 0xa9, 0xa2, + 0x8f, 0xc3, 0x30, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xa4, 0x7b, 0x2c, 0x7f, 0xbd, 0xe8, 0x4d, 0x70, + 0x99, 0xbb, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0xbf, 0xb4, 0x00, 0x2d, 0x36, 0x37, 0x49, 0x83, 0x04, + 0x4e, 0x3d, 0x56, 0xf2, 0x7d, 0xc1, 0x82, 0x29, 0x92, 0x2a, 0x9e, 0xf7, 0x1b, 0x0d, 0xf1, 0xa4, + 0xcf, 0x91, 0x3a, 0x2d, 0xe6, 0xd4, 0x89, 0xd9, 0xfa, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x81, + 0x49, 0x7e, 0x4b, 0x2a, 0x80, 0x66, 0x6b, 0xf7, 0x90, 0x20, 0x3c, 0xb9, 0x96, 0x46, 0xc1, 0x59, + 0xf5, 0xec, 0x5f, 0x1f, 0x81, 0xdc, 0x5e, 0xbc, 0xaf, 0xdd, 0x7c, 0x5f, 0xbb, 0xf9, 0xbe, 0x76, + 0xf3, 0x7d, 0xed, 0xe6, 0xfb, 0xda, 0xcd, 0xf7, 0xb5, 0x9b, 0xef, 0x52, 0xed, 0xe6, 0xdf, 0xb6, + 0xe0, 0xa8, 0xba, 0xbe, 0x8c, 0x07, 0xfb, 0x67, 0x60, 0x92, 0x6f, 0xb7, 0xf9, 0xba, 0xe3, 0x36, + 0xd6, 0x48, 0xa3, 0x59, 0x77, 0x22, 0x69, 0xc3, 0x74, 0x21, 0x73, 0xe5, 0x26, 0x1c, 0x25, 0x8c, + 0x8a, 0xdc, 0xe3, 0x2c, 0x03, 0x80, 0xb3, 0x9a, 0xb1, 0x7f, 0x65, 0x00, 0x7a, 0x17, 0xb7, 0x89, + 0x17, 0x1d, 0xc2, 0xd3, 0xa6, 0x0a, 0xa3, 0xae, 0xb7, 0xed, 0xd7, 0xb7, 0x49, 0x8d, 0xc3, 0x0f, + 0xf2, 0x02, 0x3f, 0x26, 0x48, 0x8f, 0x2e, 0x1b, 0x24, 0x70, 0x82, 0xe4, 0x83, 0xd0, 0x11, 0x5d, + 0x82, 0x3e, 0x7e, 0xf9, 0x08, 0x05, 0x51, 0xe6, 0x99, 0xcd, 0x06, 0x51, 0x5c, 0xa9, 0xb1, 0xfe, + 0x8a, 0x5f, 0x6e, 0xa2, 0x3a, 0xfa, 0x34, 0x8c, 0xae, 0xbb, 0x41, 0x18, 0xad, 0xb9, 0x0d, 0x7a, + 0x35, 0x34, 0x9a, 0xf7, 0xa0, 0x13, 0x52, 0xe3, 0xb0, 0x64, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x03, + 0x46, 0xea, 0x8e, 0xde, 0x54, 0xff, 0x81, 0x9b, 0x52, 0xb7, 0xc3, 0x55, 0x9d, 0x10, 0x36, 0xe9, + 0xd2, 0xed, 0x54, 0x65, 0x6a, 0x8d, 0x01, 0x26, 0xce, 0x50, 0xdb, 0x89, 0xeb, 0x33, 0x38, 0x8c, + 0x32, 0x68, 0xcc, 0xdd, 0x60, 0xd0, 0x64, 0xd0, 0x34, 0xa7, 0x82, 0x4f, 0xc1, 0x20, 0xa1, 0x43, + 0x48, 0x09, 0x8b, 0x0b, 0xe6, 0x7c, 0x77, 0x7d, 0x5d, 0x71, 0xab, 0x81, 0x6f, 0x6a, 0xe3, 0x16, + 0x25, 0x25, 0x1c, 0x13, 0x45, 0xf3, 0xd0, 0x17, 0x92, 0xc0, 0x55, 0x12, 0xff, 0x36, 0xd3, 0xc8, + 0xd0, 0xb8, 0x4b, 0x23, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x1c, 0x26, 0x8a, 0x65, 0x97, 0x81, + 0xb6, 0xbc, 0x66, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x3a, 0xf4, 0x07, 0xa4, 0xce, 0xd4, 0xbd, 0x23, + 0xdd, 0x2f, 0x72, 0xae, 0x3d, 0xe6, 0xf5, 0xb0, 0x24, 0x80, 0xae, 0x00, 0x0a, 0x08, 0x65, 0xf0, + 0x5c, 0x6f, 0x43, 0x19, 0xe1, 0x8b, 0x83, 0x56, 0x31, 0xd2, 0x38, 0xc6, 0x90, 0xde, 0xac, 0x38, + 0xa3, 0x1a, 0xba, 0x04, 0x13, 0xaa, 0x74, 0xd9, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x63, 0x8c, 0x96, + 0x92, 0xaf, 0xe0, 0x24, 0x02, 0x4e, 0xd7, 0xb1, 0x7f, 0xde, 0x02, 0x3e, 0xce, 0x87, 0x20, 0x55, + 0x78, 0xd5, 0x94, 0x2a, 0x9c, 0xc8, 0x9d, 0xb9, 0x1c, 0x89, 0xc2, 0xcf, 0x5b, 0x30, 0xa4, 0xcd, + 0x6c, 0xbc, 0x66, 0xad, 0x36, 0x6b, 0xb6, 0x05, 0xe3, 0x74, 0xa5, 0x5f, 0xbb, 0x15, 0x92, 0x60, + 0x9b, 0xd4, 0xd8, 0xc2, 0x2c, 0xdc, 0xdb, 0xc2, 0x54, 0x06, 0xbf, 0x57, 0x13, 0x04, 0x71, 0xaa, + 0x09, 0xfb, 0x53, 0xb2, 0xab, 0xca, 0x3e, 0xba, 0xaa, 0xe6, 0x3c, 0x61, 0x1f, 0xad, 0x66, 0x15, + 0xc7, 0x38, 0x74, 0xab, 0x6d, 0xfa, 0x61, 0x94, 0xb4, 0x8f, 0xbe, 0xec, 0x87, 0x11, 0x66, 0x10, + 0xfb, 0x39, 0x80, 0xc5, 0x3b, 0xa4, 0xca, 0x57, 0xac, 0xfe, 0xe8, 0xb1, 0xf2, 0x1f, 0x3d, 0xf6, + 0x1f, 0x58, 0x30, 0xba, 0x34, 0x6f, 0xdc, 0x5c, 0x33, 0x00, 0xfc, 0xa5, 0x76, 0xf3, 0xe6, 0xaa, + 0x34, 0xd2, 0xe1, 0x76, 0x0a, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x04, 0x14, 0xeb, 0x2d, 0x4f, 0x88, + 0x3d, 0xfb, 0xe9, 0xf5, 0x78, 0xb5, 0xe5, 0x61, 0x5a, 0xa6, 0x79, 0xb2, 0x15, 0xbb, 0xf6, 0x64, + 0xeb, 0x18, 0x50, 0x07, 0x95, 0xa0, 0xf7, 0xf6, 0x6d, 0xb7, 0xc6, 0xe3, 0x04, 0x08, 0x03, 0xa2, + 0x9b, 0x37, 0x97, 0x17, 0x42, 0xcc, 0xcb, 0xed, 0x2f, 0x16, 0x61, 0x7a, 0xa9, 0x4e, 0xee, 0xbc, + 0xc3, 0x58, 0x09, 0xdd, 0xfa, 0xe1, 0x1d, 0x4c, 0x80, 0x74, 0x50, 0x5f, 0xcb, 0xce, 0xe3, 0xb1, + 0x0e, 0xfd, 0xdc, 0x3c, 0x58, 0x46, 0x4e, 0xc8, 0x54, 0xca, 0xe6, 0x0f, 0xc8, 0x0c, 0x37, 0x33, + 0x16, 0x4a, 0x59, 0x75, 0x61, 0x8a, 0x52, 0x2c, 0x89, 0x4f, 0xbf, 0x04, 0xc3, 0x3a, 0xe6, 0x81, + 0xbc, 0x9e, 0xbf, 0xb7, 0x08, 0xe3, 0xb4, 0x07, 0x0f, 0x74, 0x22, 0xae, 0xa7, 0x27, 0xe2, 0x7e, + 0x7b, 0xbe, 0x76, 0x9e, 0x8d, 0xb7, 0x92, 0xb3, 0x71, 0x21, 0x6f, 0x36, 0x0e, 0x7b, 0x0e, 0xbe, + 0xcf, 0x82, 0xc9, 0xa5, 0xba, 0x5f, 0xdd, 0x4a, 0x78, 0xa7, 0xbe, 0x00, 0x43, 0xf4, 0x38, 0x0e, + 0x8d, 0x40, 0x2d, 0x46, 0xe8, 0x1e, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xfa, 0xf5, 0xe5, 0x85, + 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xf7, 0x2c, 0x38, 0x75, 0x69, 0x7e, 0x31, 0x5e, + 0x8a, 0xa9, 0xa0, 0x43, 0x67, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, 0x2f, + 0x04, 0xf4, 0xdd, 0x12, 0xdf, 0xeb, 0x3a, 0xc0, 0x25, 0x5c, 0x9e, 0x17, 0xe7, 0xae, 0xd4, 0x02, + 0x59, 0xb9, 0x5a, 0xa0, 0xc7, 0xa0, 0x9f, 0xde, 0x0b, 0x6e, 0x55, 0xf6, 0x9b, 0x9b, 0x5d, 0xf0, + 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x59, 0x30, 0x79, 0xc9, 0x8d, 0xe8, 0xa5, 0x9d, 0x8c, 0xaa, 0x43, + 0x6f, 0xed, 0xd0, 0x8d, 0xfc, 0x60, 0x27, 0x19, 0x55, 0x07, 0x2b, 0x08, 0xd6, 0xb0, 0xf8, 0x07, + 0x6d, 0xbb, 0xcc, 0xdf, 0xa5, 0x60, 0xea, 0xdd, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x8e, 0x57, 0xcd, + 0x0d, 0x98, 0xc8, 0x72, 0x47, 0x1c, 0xdc, 0x6a, 0xbc, 0x16, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0x9f, + 0x5b, 0x50, 0xba, 0xc4, 0xbd, 0x76, 0xd7, 0xc3, 0x9c, 0x43, 0xf7, 0x39, 0x18, 0x24, 0x52, 0x41, + 0x20, 0x7a, 0xad, 0x18, 0x51, 0xa5, 0x39, 0xe0, 0xc1, 0x7d, 0x14, 0x5e, 0x17, 0x2e, 0xf4, 0x07, + 0xf3, 0x81, 0x5e, 0x02, 0x44, 0xf4, 0xb6, 0xf4, 0x68, 0x47, 0x2c, 0x6c, 0xca, 0x62, 0x0a, 0x8a, + 0x33, 0x6a, 0xd8, 0x3f, 0x61, 0xc1, 0x51, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0xbf, 0x5a, 0x80, + 0x91, 0xcb, 0x6b, 0x6b, 0xe5, 0x4b, 0x24, 0xd2, 0x56, 0x65, 0x7b, 0xb5, 0x3f, 0xd6, 0xb4, 0x97, + 0xed, 0xde, 0x88, 0xad, 0xc8, 0xad, 0xcf, 0xf0, 0x18, 0x7e, 0x33, 0xcb, 0x5e, 0x74, 0x2d, 0xa8, + 0x44, 0x81, 0xeb, 0x6d, 0x64, 0xae, 0x74, 0xc9, 0xb3, 0x14, 0xf3, 0x78, 0x16, 0xf4, 0x1c, 0xf4, + 0xb1, 0x20, 0x82, 0x72, 0x12, 0x1e, 0x52, 0x4f, 0x2c, 0x56, 0xba, 0xbf, 0x5b, 0x1a, 0xbc, 0x8e, + 0x97, 0xf9, 0x1f, 0x2c, 0x50, 0xd1, 0x75, 0x18, 0xda, 0x8c, 0xa2, 0xe6, 0x65, 0xe2, 0xd4, 0x48, + 0x20, 0x4f, 0xd9, 0xd3, 0x59, 0xa7, 0x2c, 0x1d, 0x04, 0x8e, 0x16, 0x1f, 0x4c, 0x71, 0x59, 0x88, + 0x75, 0x3a, 0x76, 0x05, 0x20, 0x86, 0xdd, 0x27, 0xc5, 0x8d, 0xbd, 0x06, 0x83, 0xf4, 0x73, 0x67, + 0xeb, 0xae, 0xd3, 0x5e, 0x35, 0xfe, 0x14, 0x0c, 0x4a, 0xc5, 0x77, 0x28, 0x42, 0x7c, 0xb0, 0x1b, + 0x49, 0xea, 0xc5, 0x43, 0x1c, 0xc3, 0xed, 0x47, 0x41, 0x58, 0x00, 0xb7, 0x23, 0x69, 0xaf, 0xc3, + 0x11, 0x66, 0xca, 0xec, 0x44, 0x9b, 0xc6, 0x1a, 0xed, 0xbc, 0x18, 0x9e, 0x16, 0xef, 0x3a, 0xfe, + 0x65, 0x53, 0x9a, 0x0b, 0xf9, 0xb0, 0xa4, 0x18, 0xbf, 0xf1, 0xec, 0x3f, 0xeb, 0x81, 0x87, 0x96, + 0x2b, 0xf9, 0xb1, 0xa9, 0x2e, 0xc2, 0x30, 0x67, 0x17, 0xe9, 0xd2, 0x70, 0xea, 0xa2, 0x5d, 0x25, + 0x01, 0x5d, 0xd3, 0x60, 0xd8, 0xc0, 0x44, 0xa7, 0xa0, 0xe8, 0xbe, 0xed, 0x25, 0x1d, 0x2c, 0x97, + 0xdf, 0x58, 0xc5, 0xb4, 0x9c, 0x82, 0x29, 0xe7, 0xc9, 0x8f, 0x74, 0x05, 0x56, 0xdc, 0xe7, 0xab, + 0x30, 0xea, 0x86, 0xd5, 0xd0, 0x5d, 0xf6, 0xe8, 0x3e, 0xd5, 0x76, 0xba, 0x92, 0x39, 0xd0, 0x4e, + 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0xf7, 0x4b, 0x6f, 0xd7, 0xdc, 0x6b, 0xc7, 0xc8, 0x18, 0xf4, 0xf8, + 0x6f, 0xb2, 0xaf, 0x0b, 0x99, 0x08, 0x5e, 0x1c, 0xff, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0x41, + 0x57, 0xdd, 0x74, 0x9a, 0xb3, 0xad, 0x68, 0x73, 0xc1, 0x0d, 0xab, 0xfe, 0x36, 0x09, 0x76, 0xd8, + 0x5b, 0x7c, 0x20, 0x7e, 0xd0, 0x29, 0xc0, 0xfc, 0xe5, 0xd9, 0x32, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, + 0x16, 0xc6, 0x64, 0x61, 0x85, 0x84, 0xec, 0x0a, 0x18, 0x62, 0x64, 0x94, 0xcb, 0xa3, 0x28, 0x56, + 0x44, 0x92, 0xf8, 0x26, 0x83, 0x0b, 0xf7, 0x83, 0xc1, 0x7d, 0x11, 0x46, 0x5c, 0xcf, 0x8d, 0x5c, + 0x27, 0xf2, 0xb9, 0xfe, 0x88, 0x3f, 0xbb, 0x99, 0x80, 0x79, 0x59, 0x07, 0x60, 0x13, 0xcf, 0xfe, + 0x6f, 0x3d, 0x30, 0xc1, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0x5e, 0x5a, 0x61, 0xd7, 0xd3, 0x2b, 0xec, + 0x7e, 0x70, 0xee, 0xf7, 0xbc, 0xcc, 0x3e, 0x67, 0xc1, 0x04, 0x93, 0x71, 0x1b, 0xcb, 0xec, 0x3c, + 0x0c, 0x06, 0x86, 0x37, 0xea, 0xa0, 0xae, 0xd4, 0x92, 0x8e, 0xa5, 0x31, 0x0e, 0x7a, 0x0d, 0xa0, + 0x19, 0xcb, 0xd0, 0x0b, 0x46, 0x08, 0x51, 0xc8, 0x15, 0x9f, 0x6b, 0x75, 0xec, 0x4f, 0xc3, 0xa0, + 0x72, 0x37, 0x95, 0xfe, 0xe6, 0x56, 0x8e, 0xbf, 0x79, 0x67, 0x36, 0x42, 0xda, 0xc6, 0x15, 0x33, + 0x6d, 0xe3, 0xbe, 0x6c, 0x41, 0xac, 0xe1, 0x40, 0x6f, 0xc0, 0x60, 0xd3, 0x67, 0x06, 0xd1, 0x81, + 0xf4, 0x32, 0x78, 0xb4, 0xad, 0x8a, 0x84, 0xc7, 0x09, 0x0c, 0xf8, 0x74, 0x94, 0x65, 0x55, 0x1c, + 0x53, 0x41, 0x57, 0xa0, 0xbf, 0x19, 0x90, 0x4a, 0xc4, 0x82, 0x58, 0x75, 0x4f, 0x90, 0x2f, 0x5f, + 0x5e, 0x11, 0x4b, 0x0a, 0xf6, 0x2f, 0x16, 0x60, 0x3c, 0x89, 0x8a, 0x5e, 0x81, 0x1e, 0x72, 0x87, + 0x54, 0x45, 0x7f, 0x33, 0x79, 0x82, 0x58, 0x46, 0xc2, 0x07, 0x80, 0xfe, 0xc7, 0xac, 0x16, 0xba, + 0x0c, 0xfd, 0x94, 0x21, 0xb8, 0xa4, 0x02, 0x36, 0x3e, 0x9c, 0xc7, 0x54, 0x28, 0xce, 0x8a, 0x77, + 0x4e, 0x14, 0x61, 0x59, 0x9d, 0x19, 0xa4, 0x55, 0x9b, 0x15, 0xfa, 0xd6, 0x8a, 0xda, 0x89, 0x04, + 0xd6, 0xe6, 0xcb, 0x1c, 0x49, 0x50, 0xe3, 0x06, 0x69, 0xb2, 0x10, 0xc7, 0x44, 0xd0, 0x6b, 0xd0, + 0x1b, 0xd6, 0x09, 0x69, 0x0a, 0x8b, 0x83, 0x4c, 0x29, 0x67, 0x85, 0x22, 0x08, 0x4a, 0x4c, 0x2a, + 0xc2, 0x0a, 0x30, 0xaf, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x2d, 0xf8, 0x1c, 0x6f, 0x83, 0x1c, 0x82, + 0x62, 0x60, 0x01, 0x7a, 0xc2, 0x26, 0xa9, 0xb6, 0xb3, 0xf6, 0x8f, 0xfb, 0x53, 0x69, 0x92, 0x6a, + 0xbc, 0x66, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x0f, 0x30, 0x1a, 0xa3, 0x2d, 0x47, 0xa4, 0x81, + 0x9e, 0x31, 0xa2, 0xdc, 0x9c, 0x48, 0x44, 0xb9, 0x19, 0x64, 0xd8, 0x9a, 0x0c, 0xfa, 0xd3, 0x50, + 0x6c, 0x38, 0x77, 0x84, 0x90, 0xf1, 0xa9, 0xf6, 0xdd, 0xa0, 0xf4, 0x67, 0x56, 0x9c, 0x3b, 0xfc, + 0x1d, 0xfe, 0x94, 0xdc, 0x63, 0x2b, 0xce, 0x9d, 0x8e, 0x16, 0xe9, 0xb4, 0x11, 0xd6, 0x96, 0xeb, + 0x09, 0xe3, 0xb4, 0xae, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae, 0xd7, 0x45, 0x5b, 0xae, 0x87, 0xee, + 0x42, 0xbf, 0xb0, 0x1d, 0x15, 0xe1, 0xf7, 0xce, 0x77, 0xd1, 0x9e, 0x30, 0x3d, 0xe5, 0x6d, 0x9e, + 0x97, 0x72, 0x06, 0x51, 0xda, 0xb1, 0x5d, 0xd9, 0x20, 0xfa, 0x3b, 0x16, 0x8c, 0x8a, 0xdf, 0x98, + 0xbc, 0xdd, 0x22, 0x61, 0x24, 0xf8, 0xf0, 0x0f, 0x75, 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x43, + 0xf2, 0xca, 0x34, 0x81, 0x1d, 0x7b, 0x94, 0xe8, 0x05, 0xfa, 0x45, 0x0b, 0x8e, 0x34, 0x9c, 0x3b, + 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x0d, 0xc6, 0x2b, 0xdd, 0x4d, 0x7f, 0xaa, 0x3a, + 0xef, 0xa4, 0x54, 0xb8, 0x1e, 0xc9, 0x42, 0xe9, 0xd8, 0xd5, 0xcc, 0x7e, 0x4d, 0xaf, 0xc3, 0x80, + 0x5c, 0x6f, 0x0f, 0xd2, 0x30, 0x9e, 0xb5, 0x23, 0xd6, 0xda, 0x03, 0x6d, 0xe7, 0xd3, 0x30, 0xac, + 0xaf, 0xb1, 0x07, 0xda, 0xd6, 0xdb, 0x30, 0x99, 0xb1, 0x96, 0x1e, 0x68, 0x93, 0xb7, 0xe1, 0x44, + 0xee, 0xfa, 0x78, 0xa0, 0x8e, 0x0d, 0x5f, 0xb5, 0xf4, 0x73, 0xf0, 0x10, 0xb4, 0x33, 0xf3, 0xa6, + 0x76, 0xe6, 0x74, 0xfb, 0x9d, 0x93, 0xa3, 0xa2, 0x79, 0x4b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0xeb, + 0xd0, 0x57, 0xa7, 0x25, 0xd2, 0x02, 0xd9, 0xee, 0xbc, 0x23, 0x63, 0xbe, 0x98, 0x95, 0x87, 0x58, + 0x50, 0xb0, 0xbf, 0x64, 0x41, 0x86, 0x6b, 0x06, 0xe5, 0x93, 0x5a, 0x6e, 0x8d, 0x0d, 0x49, 0x31, + 0xe6, 0x93, 0x54, 0x10, 0x98, 0x53, 0x50, 0xdc, 0x70, 0x6b, 0xc2, 0xb3, 0x58, 0x81, 0x2f, 0x51, + 0xf0, 0x86, 0x5b, 0x43, 0x4b, 0x80, 0xc2, 0x56, 0xb3, 0x59, 0x67, 0x66, 0x4b, 0x4e, 0xfd, 0x52, + 0xe0, 0xb7, 0x9a, 0xdc, 0xdc, 0xb8, 0xc8, 0x85, 0x44, 0x95, 0x14, 0x14, 0x67, 0xd4, 0xb0, 0x7f, + 0xd5, 0x82, 0x9e, 0x43, 0x98, 0x26, 0x6c, 0x4e, 0xd3, 0x33, 0xb9, 0xa4, 0x45, 0xd6, 0x86, 0x19, + 0xec, 0xdc, 0x5e, 0xbc, 0x13, 0x11, 0x2f, 0x64, 0x0c, 0x47, 0xe6, 0xac, 0xed, 0x5a, 0x30, 0x79, + 0xd5, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x24, 0x58, 0xf6, 0x36, 0x0e, 0x64, 0xdb, 0x5f, + 0xe8, 0x68, 0xdb, 0x7f, 0x11, 0xfa, 0xdc, 0xa6, 0x16, 0xf6, 0xfd, 0x0c, 0x9d, 0xdd, 0xe5, 0xb2, + 0x88, 0xf8, 0x8e, 0x8c, 0xc6, 0x59, 0x29, 0x16, 0xf8, 0x74, 0x59, 0x72, 0xa3, 0xba, 0x9e, 0xfc, + 0x65, 0x49, 0xdf, 0x3a, 0xc9, 0x70, 0x66, 0x86, 0xf9, 0xf7, 0x26, 0x18, 0x4d, 0x08, 0x0f, 0x46, + 0x0c, 0xfd, 0x2e, 0xff, 0x52, 0xb1, 0x36, 0x1f, 0xcf, 0x7e, 0x83, 0xa4, 0x06, 0x46, 0xf3, 0xcd, + 0xe3, 0x05, 0x58, 0x12, 0xb2, 0x2f, 0x42, 0x66, 0xf8, 0x99, 0xce, 0xf2, 0x25, 0xfb, 0x63, 0x30, + 0xc1, 0x6a, 0x1e, 0x50, 0x76, 0x63, 0x27, 0xa4, 0xe2, 0x19, 0x11, 0x7c, 0xed, 0xff, 0x6c, 0x01, + 0x5a, 0xf1, 0x6b, 0xee, 0xfa, 0x8e, 0x20, 0xce, 0xbf, 0xff, 0x6d, 0x28, 0xf1, 0xc7, 0x71, 0x32, + 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xb8, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, 0x1d, + 0x77, 0xa2, 0x87, 0xde, 0x48, 0x04, 0x1d, 0xfc, 0x70, 0x2a, 0xe8, 0xe0, 0xe3, 0x99, 0x76, 0x31, + 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0xe7, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcb, 0x8c, + 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0xbe, 0x4b, 0x62, 0xff, 0xc6, 0x82, 0x38, + 0xdc, 0xd5, 0x21, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, 0xb8, + 0xd1, 0x15, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, 0xb3, + 0xf1, 0xf5, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0xfb, 0x13, 0xa8, 0x72, 0x1b, + 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x98, 0x0d, 0xcd, 0xb4, + 0xf4, 0x5c, 0xbd, 0x9a, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, 0xea, + 0xeb, 0xe0, 0x82, 0xfd, 0x15, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4b, 0x5c, 0x40, 0x54, 0x7f, 0x72, + 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x07, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x5d, 0xa2, + 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0xdd, 0xdf, 0x2d, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, 0x6b, + 0x5c, 0xc5, 0xfe, 0x69, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x80, 0xde, 0xe6, 0xa6, 0x13, 0x92, + 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0xee, 0xef, 0x96, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, 0x76, + 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0xbf, 0xb4, 0xa0, 0x67, 0x95, 0xde, 0x5e, + 0x0f, 0xfe, 0x08, 0x78, 0xd5, 0x38, 0x02, 0x4e, 0xe6, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, 0x89, + 0xdd, 0x7f, 0x3a, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, 0x3e, + 0x67, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x02, 0xfa, 0x85, 0x9f, + 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x64, 0x11, 0x8c, 0xfc, 0x48, 0xe8, 0xd7, + 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x46, 0xa5, 0xba, 0x49, 0x6a, + 0xad, 0xba, 0xeb, 0x6d, 0x2c, 0x6f, 0x78, 0xbe, 0x2a, 0x5e, 0xbc, 0x43, 0xaa, 0x2d, 0xa6, 0x1b, + 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbb, 0xb7, 0x5b, 0x9a, 0xc1, 0x07, 0xa2, 0x8d, 0x0f, + 0xd8, 0x17, 0xf4, 0x7b, 0x16, 0x9c, 0xe7, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, 0x59, + 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xa2, 0x18, 0xd0, 0xf3, 0xe5, 0x83, 0xb5, 0x85, + 0x0f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x44, 0x84, 0x89, 0x14, 0x77, 0xc0, 0x0b, 0xc6, 0x92, + 0x78, 0x38, 0xb1, 0x24, 0x26, 0x0c, 0xe4, 0xfb, 0x73, 0xfc, 0x87, 0x30, 0x41, 0x0f, 0xe7, 0xcb, + 0xc4, 0x09, 0xa2, 0x5b, 0xc4, 0xe1, 0x56, 0x89, 0xc5, 0x03, 0x9f, 0xfe, 0x4a, 0x3c, 0x7e, 0x35, + 0x49, 0x0c, 0xa7, 0xe9, 0xbf, 0x97, 0xee, 0x1c, 0x0f, 0xc6, 0x53, 0x91, 0x3e, 0xdf, 0x84, 0x41, + 0xe5, 0x24, 0x25, 0x0e, 0x9d, 0xf6, 0x01, 0x73, 0x93, 0x14, 0xb8, 0xd0, 0x33, 0x76, 0xd0, 0x8b, + 0xc9, 0xd9, 0xff, 0xa4, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x15, 0x06, 0x9c, 0x90, 0x05, 0xf1, 0xae, + 0xb5, 0x93, 0x4b, 0xa7, 0x9a, 0x61, 0x8e, 0x6a, 0xb3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x65, 0x6e, + 0xfb, 0xb9, 0x4d, 0xda, 0x09, 0xa5, 0x53, 0xd4, 0x40, 0x5a, 0x87, 0x6e, 0x13, 0x2c, 0xea, 0xa3, + 0x4f, 0x70, 0xe3, 0xdc, 0x2b, 0x9e, 0x7f, 0xdb, 0xbb, 0xe4, 0xfb, 0x32, 0x24, 0x50, 0x77, 0x04, + 0x27, 0xa4, 0x49, 0xae, 0xaa, 0x8e, 0x4d, 0x6a, 0xdd, 0x85, 0xce, 0xfe, 0x0c, 0xb0, 0xbc, 0x24, + 0x66, 0x4c, 0x82, 0x10, 0x11, 0x18, 0x13, 0x31, 0x48, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xe7, 0xb7, + 0x59, 0x3b, 0xd6, 0xe3, 0x5c, 0x31, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x93, 0x1f, 0xc2, 0x4b, 0xc4, + 0x89, 0x5a, 0x01, 0x09, 0xd1, 0x47, 0x61, 0x2a, 0xfd, 0x32, 0x16, 0xea, 0x10, 0x8b, 0x71, 0xcf, + 0x27, 0xf7, 0x76, 0x4b, 0x53, 0x95, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xfe, 0x59, 0x0b, 0x98, 0x27, + 0xf8, 0x21, 0x70, 0x3e, 0x1f, 0x31, 0x39, 0x9f, 0xa9, 0xbc, 0xe9, 0xcc, 0x61, 0x7a, 0x9e, 0xe7, + 0x6b, 0xb8, 0x1c, 0xf8, 0x77, 0x76, 0x84, 0xed, 0x56, 0xe7, 0x67, 0x9c, 0xfd, 0x45, 0x0b, 0x58, + 0x12, 0x1f, 0xcc, 0x5f, 0xed, 0x52, 0xc1, 0xd1, 0xd9, 0x2c, 0xe1, 0xa3, 0x30, 0xb0, 0x2e, 0x86, + 0x3f, 0x43, 0xe8, 0x64, 0x74, 0xd8, 0xa4, 0x2d, 0x27, 0x4d, 0x78, 0x74, 0x8a, 0x7f, 0x58, 0x51, + 0xb3, 0xff, 0xb1, 0x05, 0xd3, 0xf9, 0xd5, 0xd0, 0x75, 0x38, 0x1e, 0x90, 0x6a, 0x2b, 0x08, 0xe9, + 0x96, 0x10, 0x0f, 0x20, 0xe1, 0x14, 0xc5, 0xa7, 0xfa, 0xa1, 0xbd, 0xdd, 0xd2, 0x71, 0x9c, 0x8d, + 0x82, 0xf3, 0xea, 0xa2, 0x97, 0x60, 0xb4, 0x15, 0x72, 0xce, 0x8f, 0x31, 0x5d, 0xa1, 0x88, 0x14, + 0xcd, 0xfc, 0x86, 0xae, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0xbf, 0x8b, 0x2f, 0x47, 0x15, 0x2c, 0xba, + 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x68, 0xa7, 0x5b, 0x9f, 0x5d, 0x97, + 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca, 0x82, 0xe3, 0x3a, 0xa2, 0xe6, 0x0e, + 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0x3b, 0x27, + 0x57, 0xe8, 0x35, 0x51, 0xbe, 0x2f, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, 0x86, + 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, 0xfe, + 0x33, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x6d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x73, 0xf1, 0x4e, + 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0x4f, 0x75, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, 0x95, + 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc1, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, 0x26, + 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xcb, 0x45, 0x7e, 0x68, 0xb0, + 0xb7, 0xc7, 0x13, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, 0xca, + 0xbc, 0x18, 0x4b, 0x38, 0x3a, 0x07, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, 0x42, + 0xac, 0xa0, 0xe8, 0x59, 0x80, 0x66, 0xe0, 0x6f, 0xbb, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, 0xe7, + 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x65, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, 0xf7, + 0xca, 0x6e, 0xec, 0xba, 0x0e, 0xc4, 0x26, 0x2e, 0x9a, 0x85, 0xbe, 0xc8, 0x61, 0xd6, 0x66, 0xbd, + 0xf9, 0x46, 0xf4, 0x6b, 0x14, 0x43, 0xcf, 0x2c, 0x47, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x29, 0xdd, + 0xeb, 0xf9, 0x4d, 0x24, 0xbc, 0x57, 0xba, 0xbb, 0xb5, 0x34, 0xe7, 0x7a, 0xe1, 0x15, 0x63, 0xd0, + 0x42, 0x2f, 0x01, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xae, 0x6c, 0x44, 0x15, 0x23, 0xb3, 0xe0, + 0xaf, 0xfa, 0xd1, 0xf5, 0x90, 0x2c, 0x2a, 0x0c, 0xac, 0x61, 0xdb, 0xdf, 0x3f, 0x04, 0x10, 0x3f, + 0x34, 0xd0, 0x5d, 0x18, 0xa8, 0x3a, 0x4d, 0xa7, 0xca, 0xd3, 0xa6, 0x16, 0xf3, 0xbc, 0x9e, 0xe3, + 0x1a, 0x33, 0xf3, 0x02, 0x9d, 0x2b, 0x6f, 0x64, 0xc8, 0xf0, 0x01, 0x59, 0xdc, 0x51, 0x61, 0xa3, + 0xda, 0x43, 0x9f, 0xb3, 0x60, 0x48, 0x44, 0x3a, 0x62, 0x33, 0x54, 0xc8, 0xd7, 0xb7, 0x69, 0xed, + 0xcf, 0xc6, 0x35, 0x78, 0x17, 0x9e, 0x93, 0x2b, 0x54, 0x83, 0x74, 0xec, 0x85, 0xde, 0x30, 0xfa, + 0xa0, 0x7c, 0xdb, 0x16, 0x8d, 0xa1, 0x54, 0x6f, 0xdb, 0x41, 0x76, 0xd5, 0xe8, 0xcf, 0xda, 0xeb, + 0xc6, 0xb3, 0xb6, 0x27, 0xdf, 0x7f, 0xd8, 0xe0, 0xb7, 0x3b, 0xbd, 0x68, 0x51, 0x59, 0x8f, 0x25, + 0xd2, 0x9b, 0xef, 0xf4, 0xaa, 0x3d, 0xec, 0x3a, 0xc4, 0x11, 0xf9, 0x34, 0x8c, 0xd5, 0x4c, 0xae, + 0x45, 0xac, 0xc4, 0xc7, 0xf3, 0xe8, 0x26, 0x98, 0x9c, 0x98, 0x4f, 0x49, 0x00, 0x70, 0x92, 0x30, + 0x2a, 0xf3, 0xd0, 0x32, 0xcb, 0xde, 0xba, 0x2f, 0x3c, 0xa8, 0xec, 0xdc, 0xb9, 0xdc, 0x09, 0x23, + 0xd2, 0xa0, 0x98, 0x31, 0x93, 0xb0, 0x2a, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x7d, 0xcc, 0xeb, + 0x31, 0x9c, 0x1a, 0xc8, 0x57, 0x6b, 0x98, 0x91, 0x50, 0xe3, 0x0d, 0xc9, 0xfe, 0x86, 0x58, 0x50, + 0x40, 0x97, 0xa5, 0x4f, 0x71, 0xb8, 0xec, 0x5d, 0x0f, 0x09, 0xf3, 0x29, 0x1e, 0x9c, 0x7b, 0x34, + 0x76, 0x17, 0xe6, 0xe5, 0x99, 0xf9, 0x67, 0x8d, 0x9a, 0x94, 0xed, 0x13, 0xff, 0x65, 0x5a, 0x5b, + 0x11, 0xb7, 0x2d, 0xb3, 0x7b, 0x66, 0xea, 0xdb, 0x78, 0x38, 0x6f, 0x98, 0x24, 0x70, 0x92, 0x26, + 0x65, 0xa1, 0xf9, 0xae, 0x17, 0x3e, 0x58, 0x9d, 0xce, 0x0e, 0x2e, 0x39, 0x60, 0xb7, 0x11, 0x2f, + 0xc1, 0xa2, 0x3e, 0x72, 0x61, 0x2c, 0x30, 0xd8, 0x0b, 0x19, 0x6e, 0xed, 0x6c, 0x77, 0x4c, 0x8c, + 0x16, 0xc8, 0xdf, 0x24, 0x83, 0x93, 0x74, 0xd1, 0xeb, 0x1a, 0xa3, 0x34, 0xd2, 0xfe, 0xe5, 0xdf, + 0x89, 0x35, 0x9a, 0xde, 0x82, 0x11, 0xe3, 0xb0, 0x79, 0xa0, 0x2a, 0x48, 0x0f, 0xc6, 0x93, 0x27, + 0xcb, 0x03, 0xd5, 0x3c, 0xfe, 0x49, 0x0f, 0x8c, 0x9a, 0x3b, 0x01, 0x9d, 0x87, 0x41, 0x41, 0x44, + 0x65, 0xb4, 0x52, 0x9b, 0x7b, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, 0x99, 0xb1, 0xea, 0x9a, 0xaf, + 0x40, 0x9c, 0xc8, 0x4c, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xb0, 0xb7, 0x7c, 0x3f, 0x52, 0xf7, 0xa8, + 0xda, 0x2e, 0x73, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x3f, 0xb7, 0x48, 0xe0, 0x91, 0xba, 0x99, 0xd2, + 0x41, 0xdd, 0x9f, 0x57, 0x74, 0x20, 0x36, 0x71, 0x29, 0x17, 0xe0, 0x87, 0x6c, 0xff, 0x89, 0x67, + 0x72, 0xec, 0x7b, 0x51, 0xe1, 0x51, 0x24, 0x24, 0x1c, 0x7d, 0x0c, 0x8e, 0xab, 0xf0, 0x89, 0x62, + 0x75, 0xc9, 0x16, 0xfb, 0x0c, 0xa9, 0xd6, 0xf1, 0xf9, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a, + 0xa3, 0xe2, 0x29, 0x25, 0x29, 0xf6, 0x9b, 0x86, 0x84, 0x57, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xa4, + 0x14, 0xec, 0x8d, 0x21, 0x29, 0x0c, 0xa4, 0x93, 0x52, 0xe8, 0x70, 0x9c, 0xaa, 0x81, 0x66, 0x61, + 0x8c, 0xb3, 0x8e, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0x78, 0x76, 0xaa, 0x4d, 0x75, 0xcd, 0x04, 0xe3, + 0x24, 0x3e, 0xba, 0x08, 0xc3, 0x4e, 0x50, 0xdd, 0x74, 0x23, 0x52, 0xa5, 0x3b, 0x83, 0xd9, 0xf2, + 0x69, 0x96, 0x98, 0xb3, 0x1a, 0x0c, 0x1b, 0x98, 0xf6, 0x5d, 0x98, 0xcc, 0x08, 0x2f, 0x43, 0x17, + 0x8e, 0xd3, 0x74, 0xe5, 0x37, 0x25, 0xdc, 0x1d, 0x66, 0xcb, 0xcb, 0xf2, 0x6b, 0x34, 0x2c, 0xba, + 0x3a, 0x59, 0x18, 0x1a, 0x2d, 0xf9, 0xb6, 0x5a, 0x9d, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xaf, + 0x0a, 0x30, 0x96, 0xa1, 0xa0, 0x63, 0x09, 0xa0, 0x13, 0x2f, 0xad, 0x38, 0xdf, 0xb3, 0x99, 0xe3, + 0xa4, 0x70, 0x80, 0x1c, 0x27, 0xc5, 0x4e, 0x39, 0x4e, 0x7a, 0xde, 0x49, 0x8e, 0x13, 0x73, 0xc4, + 0x7a, 0xbb, 0x1a, 0xb1, 0x8c, 0xbc, 0x28, 0x7d, 0x07, 0xcc, 0x8b, 0x62, 0x0c, 0x7a, 0x7f, 0x17, + 0x83, 0xfe, 0xa3, 0x05, 0x18, 0x4f, 0xea, 0xf6, 0x0e, 0x41, 0x3e, 0xfe, 0xba, 0x21, 0x1f, 0x3f, + 0xd7, 0x8d, 0x27, 0x7e, 0xae, 0xac, 0x1c, 0x27, 0x64, 0xe5, 0x4f, 0x76, 0x45, 0xad, 0xbd, 0xdc, + 0xfc, 0xef, 0x17, 0xe0, 0x68, 0xa6, 0xca, 0xf3, 0x10, 0xc6, 0xe6, 0x9a, 0x31, 0x36, 0xcf, 0x74, + 0x1d, 0xa5, 0x20, 0x77, 0x80, 0x6e, 0x26, 0x06, 0xe8, 0x7c, 0xf7, 0x24, 0xdb, 0x8f, 0xd2, 0x37, + 0x8a, 0x70, 0x3a, 0xb3, 0x5e, 0x2c, 0x5e, 0x5e, 0x32, 0xc4, 0xcb, 0xcf, 0x26, 0xc4, 0xcb, 0x76, + 0xfb, 0xda, 0xf7, 0x47, 0xde, 0x2c, 0xbc, 0xf5, 0x59, 0xcc, 0x91, 0x7b, 0x94, 0x35, 0x1b, 0xde, + 0xfa, 0x8a, 0x10, 0x36, 0xe9, 0xbe, 0x97, 0x64, 0xcc, 0xbf, 0x6b, 0xc1, 0x89, 0xcc, 0xb9, 0x39, + 0x04, 0x49, 0xdf, 0xaa, 0x29, 0xe9, 0x7b, 0xa2, 0xeb, 0xd5, 0x9a, 0x23, 0xfa, 0xfb, 0x7c, 0x5f, + 0xce, 0xb7, 0x30, 0x01, 0xc4, 0x35, 0x18, 0x72, 0xaa, 0x55, 0x12, 0x86, 0x2b, 0x7e, 0x4d, 0xa5, + 0x43, 0x78, 0x86, 0x3d, 0x0f, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, 0x74, 0x92, 0x44, 0x0c, 0xc6, 0x3a, + 0x05, 0xf4, 0x09, 0x18, 0x08, 0x65, 0x26, 0xcb, 0x9e, 0x7b, 0xcf, 0x64, 0xc9, 0x98, 0x5c, 0x25, + 0x60, 0x51, 0x24, 0xd1, 0x77, 0xea, 0xd1, 0x9f, 0xda, 0x88, 0x16, 0x79, 0x27, 0xef, 0x21, 0x06, + 0xd4, 0xb3, 0x00, 0xdb, 0xea, 0x25, 0x93, 0x14, 0x9e, 0x68, 0x6f, 0x1c, 0x0d, 0x0b, 0xbd, 0x06, + 0xe3, 0x21, 0x0f, 0x7c, 0x1a, 0x1b, 0xa9, 0xf0, 0xb5, 0xc8, 0x62, 0xc7, 0x55, 0x12, 0x30, 0x9c, + 0xc2, 0x46, 0x4b, 0xb2, 0x55, 0x66, 0x8e, 0xc4, 0x97, 0xe7, 0xd9, 0xb8, 0x45, 0x61, 0x92, 0x74, + 0x24, 0x39, 0x09, 0x6c, 0xf8, 0xb5, 0x9a, 0xe8, 0x13, 0x00, 0x74, 0x11, 0x09, 0x21, 0x4a, 0x7f, + 0xfe, 0x11, 0x4a, 0xcf, 0x96, 0x5a, 0xa6, 0x27, 0x03, 0x73, 0xb3, 0x5f, 0x50, 0x44, 0xb0, 0x46, + 0x10, 0x39, 0x30, 0x12, 0xff, 0x8b, 0x73, 0xb4, 0x9f, 0xcb, 0x6d, 0x21, 0x49, 0x9c, 0x29, 0x18, + 0x16, 0x74, 0x12, 0xd8, 0xa4, 0x88, 0x3e, 0x0e, 0x27, 0xb6, 0x73, 0x2d, 0x7f, 0x38, 0x27, 0xc8, + 0x92, 0xae, 0xe7, 0xdb, 0xfb, 0xe4, 0xd7, 0xb7, 0xff, 0x2d, 0xc0, 0x43, 0x6d, 0x4e, 0x7a, 0x34, + 0x6b, 0x6a, 0xed, 0x9f, 0x4a, 0x4a, 0x36, 0xa6, 0x33, 0x2b, 0x1b, 0xa2, 0x8e, 0xc4, 0x86, 0x2a, + 0xbc, 0xe3, 0x0d, 0xf5, 0x43, 0x96, 0x26, 0x73, 0xe2, 0x36, 0xdd, 0x1f, 0x39, 0xe0, 0x0d, 0x76, + 0x1f, 0x85, 0x50, 0xeb, 0x19, 0x92, 0x9c, 0x67, 0xbb, 0xee, 0x4e, 0xf7, 0xa2, 0x9d, 0xaf, 0x66, + 0x07, 0x7c, 0xe7, 0x42, 0x9e, 0x4b, 0x07, 0xfd, 0xfe, 0xc3, 0x0a, 0xfe, 0xfe, 0x75, 0x0b, 0x4e, + 0xa4, 0x8a, 0x79, 0x1f, 0x48, 0x28, 0xa2, 0xdd, 0xad, 0xbe, 0xe3, 0xce, 0x4b, 0x82, 0xfc, 0x1b, + 0x2e, 0x8b, 0x6f, 0x38, 0x91, 0x8b, 0x97, 0xec, 0xfa, 0x17, 0xfe, 0xb8, 0x34, 0xc9, 0x1a, 0x30, + 0x11, 0x71, 0x7e, 0xd7, 0x51, 0x13, 0xce, 0x54, 0x5b, 0x41, 0x10, 0x2f, 0xd6, 0x8c, 0xcd, 0xc9, + 0xdf, 0x7a, 0x8f, 0xee, 0xed, 0x96, 0xce, 0xcc, 0x77, 0xc0, 0xc5, 0x1d, 0xa9, 0x21, 0x0f, 0x50, + 0x23, 0x65, 0x5f, 0xc7, 0x0e, 0x80, 0x1c, 0x39, 0x4c, 0xda, 0x1a, 0x8f, 0x5b, 0xca, 0x66, 0x58, + 0xe9, 0x65, 0x50, 0x3e, 0x5c, 0xe9, 0xc9, 0xb7, 0x26, 0x2e, 0xfd, 0xf4, 0x55, 0x38, 0xdd, 0x7e, + 0x31, 0x1d, 0x28, 0x94, 0xc3, 0x1f, 0x58, 0x70, 0xaa, 0x6d, 0xbc, 0xb0, 0x6f, 0xc3, 0xc7, 0x82, + 0xfd, 0x59, 0x0b, 0x1e, 0xce, 0xac, 0x91, 0x74, 0xc2, 0xab, 0xd2, 0x42, 0xcd, 0x1c, 0x35, 0x8e, + 0x9c, 0x23, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x36, 0x0b, 0x1d, 0x2d, 0x36, 0x7f, 0xcb, 0x82, 0xd4, + 0x55, 0x7f, 0x08, 0x9c, 0xe7, 0xb2, 0xc9, 0x79, 0x3e, 0xda, 0xcd, 0x68, 0xe6, 0x30, 0x9d, 0x7f, + 0x31, 0x06, 0xc7, 0x72, 0x3c, 0xb1, 0xb7, 0x61, 0x62, 0xa3, 0x4a, 0xcc, 0xd0, 0x1b, 0xed, 0x42, + 0xd2, 0xb5, 0x8d, 0xd3, 0x31, 0x77, 0x74, 0x6f, 0xb7, 0x34, 0x91, 0x42, 0xc1, 0xe9, 0x26, 0xd0, + 0x67, 0x2d, 0x38, 0xe2, 0xdc, 0x0e, 0x17, 0xe9, 0x0b, 0xc2, 0xad, 0xce, 0xd5, 0xfd, 0xea, 0x16, + 0x65, 0xcc, 0xe4, 0xb6, 0x7a, 0x3e, 0x53, 0x18, 0x7d, 0xb3, 0x92, 0xc2, 0x37, 0x9a, 0x9f, 0xda, + 0xdb, 0x2d, 0x1d, 0xc9, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x32, 0x7e, 0x39, 0xd1, 0x66, 0xbb, + 0xe0, 0x30, 0x59, 0x2e, 0xf3, 0x9c, 0x25, 0x96, 0x10, 0xac, 0xe8, 0xa0, 0x4f, 0xc1, 0xe0, 0x86, + 0x8c, 0x03, 0x91, 0xc1, 0x72, 0xc7, 0x03, 0xd9, 0x3e, 0x3a, 0x06, 0x37, 0x81, 0x51, 0x48, 0x38, + 0x26, 0x8a, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0x8a, 0x10, 0x75, 0xd9, 0x96, 0xb8, 0xa6, 0xad, 0x33, + 0x0f, 0xc1, 0xb4, 0xba, 0x54, 0xc1, 0xb4, 0x22, 0xba, 0x0c, 0xc5, 0xe0, 0x56, 0x4d, 0x68, 0x52, + 0x32, 0x37, 0x29, 0x9e, 0x5b, 0xc8, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0x16, 0x30, 0x25, 0x81, 0xca, + 0xd0, 0xcb, 0xdc, 0x97, 0x05, 0x6b, 0x9b, 0xf9, 0x94, 0x6f, 0x13, 0x06, 0x80, 0x7b, 0x24, 0x32, + 0x04, 0xcc, 0x09, 0xa1, 0x35, 0xe8, 0xab, 0xba, 0x5e, 0x8d, 0x04, 0x82, 0x97, 0xfd, 0x60, 0xa6, + 0xce, 0x84, 0x61, 0xe4, 0xd0, 0xe4, 0x2a, 0x04, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x34, 0x37, + 0xd7, 0xe5, 0x8d, 0x95, 0x4d, 0x95, 0x34, 0x37, 0x97, 0x2a, 0x6d, 0xa9, 0x32, 0x0c, 0x2c, 0x68, + 0xa1, 0x97, 0xa0, 0xb0, 0x5e, 0x15, 0xae, 0xc9, 0x99, 0xca, 0x13, 0x33, 0x8a, 0xd6, 0x5c, 0xdf, + 0xde, 0x6e, 0xa9, 0xb0, 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xb4, 0x0a, 0xfd, 0xeb, 0x3c, 0xee, 0x8e, + 0xd0, 0x8f, 0x3c, 0x9e, 0x1d, 0x12, 0x28, 0x15, 0x9a, 0x87, 0x7b, 0x97, 0x0a, 0x00, 0x96, 0x44, + 0x58, 0x02, 0x2a, 0x15, 0x3f, 0x48, 0x84, 0x2f, 0x9d, 0x39, 0x58, 0xcc, 0x27, 0xfe, 0xd4, 0x88, + 0xa3, 0x10, 0x61, 0x8d, 0x22, 0x5d, 0xd5, 0xce, 0xdd, 0x56, 0xc0, 0x72, 0x5b, 0x08, 0xd5, 0x48, + 0xe6, 0xaa, 0x9e, 0x95, 0x48, 0xed, 0x56, 0xb5, 0x42, 0xc2, 0x31, 0x51, 0xb4, 0x05, 0x23, 0xdb, + 0x61, 0x73, 0x93, 0xc8, 0x2d, 0xcd, 0xc2, 0xde, 0xe5, 0x70, 0xb3, 0x37, 0x04, 0xa2, 0x1b, 0x44, + 0x2d, 0xa7, 0x9e, 0x3a, 0x85, 0xd8, 0xb3, 0xe6, 0x86, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0xf0, 0xbf, + 0xdd, 0xf2, 0x6f, 0xed, 0x44, 0x44, 0x44, 0x1d, 0xcd, 0x1c, 0xfe, 0x37, 0x38, 0x4a, 0x7a, 0xf8, + 0x05, 0x00, 0x4b, 0x22, 0xe8, 0x86, 0x18, 0x1e, 0x76, 0x7a, 0x8e, 0xe7, 0x87, 0x34, 0x9f, 0x95, + 0x48, 0x39, 0x83, 0xc2, 0x4e, 0xcb, 0x98, 0x14, 0x3b, 0x25, 0x9b, 0x9b, 0x7e, 0xe4, 0x7b, 0x89, + 0x13, 0x7a, 0x22, 0xff, 0x94, 0x2c, 0x67, 0xe0, 0xa7, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, + 0xaa, 0xc1, 0x68, 0xd3, 0x0f, 0xa2, 0xdb, 0x7e, 0x20, 0xd7, 0x17, 0x6a, 0x23, 0x28, 0x35, 0x30, + 0x45, 0x8b, 0xcc, 0x30, 0xc7, 0x84, 0xe0, 0x04, 0x4d, 0xf4, 0x51, 0xe8, 0x0f, 0xab, 0x4e, 0x9d, + 0x2c, 0x5f, 0x9b, 0x9a, 0xcc, 0xbf, 0x7e, 0x2a, 0x1c, 0x25, 0x67, 0x75, 0xf1, 0xb0, 0x49, 0x1c, + 0x05, 0x4b, 0x72, 0x68, 0x09, 0x7a, 0x59, 0x62, 0x67, 0x16, 0x22, 0x37, 0x27, 0x32, 0x7b, 0xca, + 0xad, 0x86, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, 0x08, 0x49, 0x81, 0x1f, 0x4e, 0x1d, + 0xcd, 0xdf, 0x03, 0x42, 0xc0, 0x70, 0xad, 0xd2, 0x6e, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, + 0x33, 0x3d, 0x4d, 0x8f, 0xb5, 0x31, 0x99, 0xcc, 0x3d, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, + 0xc2, 0xfe, 0x8d, 0x81, 0x34, 0xcf, 0xc2, 0x24, 0x4c, 0xff, 0xbf, 0x95, 0xb2, 0x99, 0xf8, 0x50, + 0xb7, 0x02, 0xef, 0xfb, 0xf8, 0x70, 0xfd, 0xac, 0x05, 0xc7, 0x9a, 0x99, 0x1f, 0x22, 0x18, 0x80, + 0xee, 0xe4, 0xe6, 0xfc, 0xd3, 0x55, 0x38, 0xe5, 0x6c, 0x38, 0xce, 0x69, 0x29, 0x29, 0x1c, 0x28, + 0xbe, 0x63, 0xe1, 0xc0, 0x0a, 0x0c, 0x54, 0xf9, 0x4b, 0x4e, 0xa6, 0x01, 0xe8, 0x2a, 0x18, 0x28, + 0x63, 0x25, 0xc4, 0x13, 0x70, 0x1d, 0x2b, 0x12, 0xe8, 0x87, 0x2d, 0x38, 0x95, 0xec, 0x3a, 0x26, + 0x0c, 0x2c, 0x0c, 0x26, 0xb9, 0x58, 0x6b, 0x49, 0x7c, 0x7f, 0x8a, 0xff, 0x37, 0x90, 0xf7, 0x3b, + 0x21, 0xe0, 0xf6, 0x8d, 0xa1, 0x85, 0x0c, 0xb9, 0x5a, 0x9f, 0xa9, 0x51, 0xec, 0x42, 0xb6, 0xf6, + 0x3c, 0x0c, 0x37, 0xfc, 0x96, 0x17, 0x09, 0xbb, 0x47, 0x61, 0x3c, 0xc5, 0x8c, 0x86, 0x56, 0xb4, + 0x72, 0x6c, 0x60, 0x25, 0x24, 0x72, 0x03, 0xf7, 0x2c, 0x91, 0x7b, 0x0b, 0x86, 0x3d, 0xcd, 0x25, + 0xa0, 0xdd, 0x0b, 0x56, 0x48, 0x17, 0x35, 0x6c, 0xde, 0x4b, 0xbd, 0x04, 0x1b, 0xd4, 0xda, 0x4b, + 0xcb, 0xe0, 0x9d, 0x49, 0xcb, 0x0e, 0xf5, 0x49, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc5, 0xc0, 0xa5, + 0x72, 0xaf, 0x98, 0x52, 0xb9, 0xb3, 0x49, 0xa9, 0x5c, 0x4a, 0x55, 0x65, 0x08, 0xe4, 0xba, 0xcf, + 0x28, 0xd9, 0x75, 0x80, 0xe7, 0xef, 0xb5, 0xe0, 0x38, 0xd3, 0x7d, 0xd0, 0x06, 0xde, 0xb1, 0xbe, + 0x83, 0x99, 0xa4, 0x5e, 0xcd, 0x26, 0x87, 0xf3, 0xda, 0xb1, 0xeb, 0x70, 0xa6, 0xd3, 0xbd, 0xcb, + 0x2c, 0x7c, 0x6b, 0xca, 0x38, 0x22, 0xb6, 0xf0, 0xad, 0x2d, 0x2f, 0x60, 0x06, 0xe9, 0x36, 0x7c, + 0xa1, 0xfd, 0xdf, 0x2d, 0x28, 0x96, 0xfd, 0xda, 0x21, 0xbc, 0xe8, 0x3f, 0x62, 0xbc, 0xe8, 0x1f, + 0xca, 0xbe, 0xf1, 0x6b, 0xb9, 0xca, 0xbe, 0xc5, 0x84, 0xb2, 0xef, 0x54, 0x1e, 0x81, 0xf6, 0xaa, + 0xbd, 0x9f, 0x2e, 0xc2, 0x50, 0xd9, 0xaf, 0xa9, 0x7d, 0xf6, 0xaf, 0xee, 0xc5, 0x91, 0x27, 0x37, + 0xfb, 0x94, 0x46, 0x99, 0x59, 0xf4, 0xca, 0xb8, 0x13, 0xdf, 0x66, 0xfe, 0x3c, 0x37, 0x89, 0xbb, + 0xb1, 0x19, 0x91, 0x5a, 0xf2, 0x73, 0x0e, 0xcf, 0x9f, 0xe7, 0x9b, 0x45, 0x18, 0x4b, 0xb4, 0x8e, + 0xea, 0x30, 0x52, 0xd7, 0x55, 0x49, 0x62, 0x9d, 0xde, 0x93, 0x16, 0x4a, 0xf8, 0x43, 0x68, 0x45, + 0xd8, 0x24, 0x8e, 0x66, 0x00, 0x3c, 0xdd, 0x2a, 0x5c, 0x05, 0x2a, 0xd6, 0x2c, 0xc2, 0x35, 0x0c, + 0xf4, 0x02, 0x0c, 0x45, 0x7e, 0xd3, 0xaf, 0xfb, 0x1b, 0x3b, 0x57, 0x88, 0x8c, 0x6c, 0xa9, 0x8c, + 0x86, 0xd7, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x1d, 0x98, 0x50, 0x44, 0x2a, 0xf7, 0x41, 0xbd, 0xc6, + 0xc4, 0x26, 0xab, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x09, 0x46, 0x99, 0xf5, 0x32, 0xab, 0x7f, + 0x85, 0xec, 0xc8, 0x88, 0xc7, 0x8c, 0xc3, 0x5e, 0x31, 0x20, 0x38, 0x81, 0x89, 0xe6, 0x61, 0xa2, + 0xe1, 0x86, 0x89, 0xea, 0x7d, 0xac, 0x3a, 0xeb, 0xc0, 0x4a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0xcf, + 0x89, 0x39, 0xf6, 0x22, 0xf7, 0xfd, 0xed, 0xf8, 0xee, 0xde, 0x8e, 0xdf, 0xb0, 0x60, 0x9c, 0xb6, + 0xce, 0x4c, 0x32, 0x25, 0x23, 0xa5, 0x72, 0x62, 0x58, 0x6d, 0x72, 0x62, 0x9c, 0xa5, 0xc7, 0x76, + 0xcd, 0x6f, 0x45, 0x42, 0x3a, 0xaa, 0x9d, 0xcb, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, + 0xfc, 0xde, 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xca, 0x8c, 0x9e, 0xec, 0x94, 0x19, 0x3c, + 0xf2, 0xb9, 0xb0, 0x82, 0x13, 0x2c, 0xad, 0x16, 0xf9, 0x5c, 0x9a, 0xc7, 0xc5, 0x38, 0xf6, 0x57, + 0x8b, 0x30, 0x5c, 0xf6, 0x6b, 0xb1, 0x61, 0xc7, 0xf3, 0x86, 0x61, 0xc7, 0x99, 0x84, 0x61, 0xc7, + 0xb8, 0x8e, 0xfb, 0xbe, 0x19, 0xc7, 0xb7, 0xca, 0x8c, 0xe3, 0x37, 0x2d, 0x36, 0x6b, 0x0b, 0xab, + 0x15, 0x6e, 0xe1, 0x8b, 0x2e, 0xc0, 0x10, 0x3b, 0xe1, 0x58, 0xa0, 0x05, 0x69, 0xed, 0xc0, 0x52, + 0x58, 0xae, 0xc6, 0xc5, 0x58, 0xc7, 0x41, 0xe7, 0x60, 0x20, 0x24, 0x4e, 0x50, 0xdd, 0x54, 0xc7, + 0xbb, 0x30, 0x4d, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xd0, 0xed, 0x62, 0xbe, 0xb9, 0xb0, + 0xde, 0x1f, 0xbe, 0x45, 0xf2, 0x23, 0x6d, 0xdb, 0x37, 0x01, 0xa5, 0xf1, 0xbb, 0xf0, 0xbf, 0x2a, + 0x99, 0x61, 0x61, 0x07, 0x53, 0x21, 0x61, 0xff, 0xda, 0x82, 0xd1, 0xb2, 0x5f, 0xa3, 0x5b, 0xf7, + 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x02, 0xbd, 0x65, 0xbf, 0xd6, + 0x21, 0x74, 0xed, 0x3f, 0xb0, 0xa0, 0xbf, 0xec, 0xd7, 0x0e, 0x41, 0xf1, 0xf2, 0x8a, 0xa9, 0x78, + 0x39, 0x9e, 0xb3, 0x6e, 0x72, 0x74, 0x2d, 0x7f, 0xaf, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x0d, 0x39, + 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x3b, 0x39, 0xad, + 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x06, 0x9a, 0x01, 0xd9, 0x76, 0x7d, 0xc1, 0x5f, 0x6b, + 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, 0xf7, + 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x9b, 0x30, 0xc8, 0xfe, + 0xb3, 0x63, 0xa7, 0xf7, 0xc0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x16, + 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, 0x58, + 0xe8, 0x29, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xd5, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, 0xeb, + 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x89, 0x44, 0x6a, 0xba, 0x22, + 0xe7, 0x05, 0xaf, 0xaa, 0x52, 0xac, 0x61, 0xa0, 0x4d, 0x38, 0xe9, 0x7a, 0x2c, 0x85, 0x14, 0xa9, + 0x6c, 0xb9, 0xcd, 0xb5, 0xab, 0x95, 0x1b, 0x24, 0x70, 0xd7, 0x77, 0xe6, 0x9c, 0xea, 0x16, 0xf1, + 0x64, 0x42, 0xfc, 0x47, 0x45, 0x17, 0x4f, 0x2e, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0xd9, 0xcf, 0xb1, + 0xf5, 0x7e, 0xad, 0x82, 0x9e, 0x34, 0x8e, 0x8e, 0x63, 0xfa, 0xd1, 0xb1, 0xbf, 0x5b, 0xea, 0xbb, + 0x56, 0xd1, 0x62, 0xff, 0x5c, 0x84, 0xa3, 0x65, 0xbf, 0x56, 0xf6, 0x83, 0x68, 0xc9, 0x0f, 0x6e, + 0x3b, 0x41, 0x4d, 0x2e, 0xaf, 0x92, 0x8c, 0x7e, 0x44, 0xcf, 0xcf, 0x5e, 0x7e, 0xba, 0x18, 0x91, + 0x8d, 0x9e, 0x63, 0x1c, 0xdb, 0x01, 0x9d, 0x4d, 0xab, 0x8c, 0x77, 0x50, 0x49, 0xd8, 0x2e, 0x39, + 0x11, 0x41, 0xd7, 0x60, 0xa4, 0xaa, 0x5f, 0xa3, 0xa2, 0xfa, 0x13, 0xf2, 0x22, 0x33, 0xee, 0xd8, + 0xcc, 0x7b, 0xd7, 0xac, 0x6f, 0x7f, 0xdd, 0x12, 0xad, 0x70, 0x49, 0x04, 0xb7, 0x69, 0xed, 0x7c, + 0x9e, 0xce, 0xc3, 0x44, 0xa0, 0x57, 0xd1, 0x6c, 0xc3, 0x8e, 0xf2, 0xac, 0x36, 0x09, 0x20, 0x4e, + 0xe3, 0xa3, 0x8f, 0xc3, 0x09, 0xa3, 0x50, 0xaa, 0xc9, 0xb5, 0xdc, 0xd2, 0x4c, 0x56, 0x83, 0xf3, + 0x90, 0x70, 0x7e, 0x7d, 0xfb, 0xbb, 0xe1, 0x58, 0xf2, 0xbb, 0x84, 0xf4, 0xe4, 0x1e, 0xbf, 0xae, + 0x70, 0xb0, 0xaf, 0xb3, 0x5f, 0x80, 0x09, 0xfa, 0xac, 0x56, 0x2c, 0x22, 0x9b, 0xbf, 0xce, 0x01, + 0xa6, 0xfe, 0x5d, 0x3f, 0xbb, 0xe2, 0x12, 0x99, 0xd5, 0xd0, 0x27, 0x61, 0x34, 0x24, 0x2c, 0xaa, + 0x9a, 0x94, 0xda, 0xb5, 0xf1, 0x14, 0xaf, 0x2c, 0xea, 0x98, 0xfc, 0x65, 0x62, 0x96, 0xe1, 0x04, + 0x35, 0xd4, 0x80, 0xd1, 0xdb, 0xae, 0x57, 0xf3, 0x6f, 0x87, 0x92, 0xfe, 0x40, 0xbe, 0x0a, 0xe0, + 0x26, 0xc7, 0x4c, 0xf4, 0xd1, 0x68, 0xee, 0xa6, 0x41, 0x0c, 0x27, 0x88, 0xd3, 0x63, 0x24, 0x68, + 0x79, 0xb3, 0xe1, 0xf5, 0x90, 0x04, 0x22, 0xe6, 0x1b, 0x3b, 0x46, 0xb0, 0x2c, 0xc4, 0x31, 0x9c, + 0x1e, 0x23, 0xec, 0x0f, 0x73, 0x35, 0x67, 0xe7, 0x94, 0x38, 0x46, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, + 0x3d, 0x66, 0xd9, 0xbf, 0x55, 0xdf, 0xc3, 0xbe, 0x1f, 0xc9, 0x83, 0x99, 0xa5, 0xa1, 0xd4, 0xca, + 0xb1, 0x81, 0x95, 0x13, 0x61, 0xae, 0xe7, 0xa0, 0x11, 0xe6, 0x50, 0xd4, 0xc6, 0xbb, 0x9e, 0x47, + 0x3a, 0xbe, 0xd8, 0xce, 0xbb, 0x7e, 0xff, 0x9e, 0x3c, 0xef, 0xe9, 0x3d, 0xbf, 0x2e, 0x06, 0xa8, + 0x97, 0x87, 0xd0, 0x63, 0x4a, 0xca, 0x0a, 0x1f, 0x1d, 0x09, 0x43, 0x8b, 0xd0, 0x1f, 0xee, 0x84, + 0xd5, 0xa8, 0x1e, 0xb6, 0x4b, 0x35, 0x5a, 0x61, 0x28, 0x5a, 0xa6, 0x6b, 0x5e, 0x05, 0xcb, 0xba, + 0xa8, 0x0a, 0x93, 0x82, 0xe2, 0xfc, 0xa6, 0xe3, 0xa9, 0x04, 0x88, 0xdc, 0x1a, 0xf1, 0xc2, 0xde, + 0x6e, 0x69, 0x52, 0xb4, 0xac, 0x83, 0xf7, 0x77, 0x4b, 0x74, 0x4b, 0x66, 0x40, 0x70, 0x16, 0x35, + 0xbe, 0xe4, 0xab, 0x55, 0xbf, 0xd1, 0x2c, 0x07, 0xfe, 0xba, 0x5b, 0x27, 0xed, 0x14, 0xbd, 0x15, + 0x03, 0x53, 0x2c, 0x79, 0xa3, 0x0c, 0x27, 0xa8, 0xa1, 0x5b, 0x30, 0xe6, 0x34, 0x9b, 0xb3, 0x41, + 0xc3, 0x0f, 0x64, 0x03, 0x43, 0xf9, 0x1a, 0x83, 0x59, 0x13, 0x95, 0xe7, 0x3f, 0x4c, 0x14, 0xe2, + 0x24, 0x41, 0xfb, 0xbb, 0x18, 0xbf, 0x5d, 0x71, 0x37, 0x3c, 0xe6, 0x93, 0x86, 0x1a, 0x30, 0xd2, + 0x64, 0x27, 0xb2, 0x48, 0x1b, 0x26, 0x76, 0xf1, 0xf3, 0x5d, 0xca, 0x0c, 0x6f, 0xb3, 0xc4, 0xa7, + 0x86, 0xed, 0x68, 0x59, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0xbf, 0x3f, 0xc1, 0x38, 0xb6, 0x0a, 0x17, + 0x04, 0xf6, 0x0b, 0x0f, 0x41, 0xf1, 0xf4, 0x9f, 0xce, 0x17, 0xb9, 0xc7, 0x53, 0x2f, 0xbc, 0x0c, + 0xb1, 0xac, 0x8b, 0x3e, 0x01, 0xa3, 0xf4, 0x25, 0xad, 0xb8, 0xa6, 0x70, 0xea, 0x48, 0x7e, 0xe8, + 0x29, 0x85, 0xa5, 0xa7, 0x14, 0xd4, 0x2b, 0xe3, 0x04, 0x31, 0xf4, 0x06, 0x33, 0xa7, 0x94, 0xa4, + 0x0b, 0xdd, 0x90, 0xd6, 0x2d, 0x27, 0x25, 0x59, 0x8d, 0x08, 0x6a, 0xc1, 0x64, 0x3a, 0x71, 0x72, + 0x38, 0x65, 0xe7, 0x3f, 0x49, 0xd2, 0xb9, 0x8f, 0xe3, 0xdc, 0x6f, 0x69, 0x58, 0x88, 0xb3, 0xe8, + 0xa3, 0xab, 0xc9, 0xb4, 0xb6, 0x45, 0x43, 0x58, 0x9f, 0x4a, 0x6d, 0x3b, 0xd2, 0x36, 0xa3, 0xed, + 0x06, 0x9c, 0xd2, 0x32, 0x83, 0x5e, 0x0a, 0x1c, 0x66, 0xce, 0xe3, 0xb2, 0x8b, 0x42, 0xe3, 0x25, + 0x1f, 0xde, 0xdb, 0x2d, 0x9d, 0x5a, 0x6b, 0x87, 0x88, 0xdb, 0xd3, 0x41, 0xd7, 0xe0, 0x28, 0x0f, + 0x9c, 0xb2, 0x40, 0x9c, 0x5a, 0xdd, 0xf5, 0x14, 0xb3, 0xca, 0x8f, 0x95, 0x13, 0x7b, 0xbb, 0xa5, + 0xa3, 0xb3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x05, 0x06, 0x6b, 0x9e, 0x3c, 0x00, 0xfb, 0x8c, + 0xe4, 0xab, 0x83, 0x0b, 0xab, 0x15, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, 0x57, 0x40, 0x1b, 0x5c, 0x5b, + 0xa4, 0x44, 0x7c, 0xfd, 0xa9, 0x78, 0x9a, 0x49, 0x29, 0xb8, 0x11, 0x89, 0x80, 0xab, 0x49, 0x95, + 0xa7, 0x9b, 0x11, 0xa4, 0xc0, 0x20, 0x8c, 0x5e, 0x07, 0x24, 0x92, 0xfc, 0xcc, 0x56, 0x59, 0x4e, + 0x3a, 0xcd, 0x84, 0x53, 0xbd, 0xdc, 0x2b, 0x29, 0x0c, 0x9c, 0x51, 0x0b, 0x5d, 0xa6, 0x27, 0x97, + 0x5e, 0x2a, 0x4e, 0x46, 0x95, 0xe2, 0x7b, 0x81, 0x34, 0x03, 0xc2, 0xac, 0x0e, 0x4d, 0x8a, 0x38, + 0x51, 0x0f, 0xd5, 0xe0, 0xa4, 0xd3, 0x8a, 0x7c, 0xa6, 0x88, 0x33, 0x51, 0xd7, 0xfc, 0x2d, 0xe2, + 0x31, 0x1d, 0xf8, 0x00, 0x8b, 0xd3, 0x79, 0x72, 0xb6, 0x0d, 0x1e, 0x6e, 0x4b, 0x85, 0xbe, 0x62, + 0xe8, 0x58, 0x68, 0x3a, 0x32, 0xc3, 0xa9, 0x9a, 0x2b, 0x8e, 0x25, 0x06, 0x7a, 0x01, 0x86, 0x36, + 0xfd, 0x30, 0x5a, 0x25, 0xd1, 0x6d, 0x3f, 0xd8, 0x12, 0x59, 0x05, 0xe2, 0x4c, 0x2e, 0x31, 0x08, + 0xeb, 0x78, 0xe8, 0x09, 0xe8, 0x67, 0x16, 0x5a, 0xcb, 0x0b, 0xec, 0x1a, 0x1c, 0x88, 0xcf, 0x98, + 0xcb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0x2e, 0xcf, 0x33, 0x43, 0x97, 0x04, 0xea, 0x72, 0x79, + 0x1e, 0x4b, 0x38, 0x5d, 0xae, 0xe1, 0xa6, 0x13, 0x90, 0x72, 0xe0, 0x57, 0x49, 0xa8, 0xe5, 0x0f, + 0x7a, 0x88, 0xe7, 0x4c, 0xa0, 0xcb, 0xb5, 0x92, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x92, 0xce, 0x8a, + 0x3b, 0x9a, 0xaf, 0xa1, 0x4c, 0x73, 0x6a, 0x5d, 0x26, 0xc6, 0xf5, 0x60, 0x5c, 0xe5, 0xe3, 0xe5, + 0x59, 0x12, 0xc2, 0xa9, 0x31, 0xb6, 0xb6, 0xbb, 0x4f, 0xb1, 0xa0, 0x74, 0xbe, 0xcb, 0x09, 0x4a, + 0x38, 0x45, 0xdb, 0x08, 0x04, 0x3b, 0xde, 0x31, 0x10, 0xec, 0x79, 0x18, 0x0c, 0x5b, 0xb7, 0x6a, + 0x7e, 0xc3, 0x71, 0x3d, 0x66, 0xe8, 0xa2, 0xbd, 0x97, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x04, + 0x03, 0x8e, 0x54, 0xe8, 0xa2, 0xfc, 0x18, 0x77, 0x4a, 0x8d, 0xcb, 0xc3, 0x3e, 0x49, 0x15, 0xae, + 0xaa, 0x8b, 0x5e, 0x86, 0x11, 0x11, 0x47, 0x43, 0xa4, 0xb0, 0x9f, 0x34, 0xbd, 0x86, 0x2b, 0x3a, + 0x10, 0x9b, 0xb8, 0xe8, 0x3a, 0x0c, 0x45, 0x7e, 0x9d, 0xb9, 0xbe, 0x52, 0x06, 0xf6, 0x58, 0x7e, + 0x28, 0xda, 0x35, 0x85, 0xa6, 0xab, 0x1a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x6b, 0x7c, 0xbd, 0xb3, + 0x6c, 0x41, 0x24, 0x14, 0x39, 0xd0, 0x4f, 0xe5, 0x59, 0x29, 0x32, 0x34, 0x73, 0x3b, 0x88, 0x9a, + 0x58, 0x27, 0x83, 0x2e, 0xc1, 0x44, 0x33, 0x70, 0x7d, 0xb6, 0x26, 0x94, 0x82, 0x7a, 0xca, 0xcc, + 0x0d, 0x5a, 0x4e, 0x22, 0xe0, 0x74, 0x1d, 0x16, 0x06, 0x45, 0x14, 0x4e, 0x9d, 0xe0, 0xf9, 0xcd, + 0xb8, 0xf8, 0x81, 0x97, 0x61, 0x05, 0x45, 0x2b, 0xec, 0x24, 0xe6, 0x92, 0xb3, 0xa9, 0xe9, 0x7c, + 0xe7, 0x7a, 0x5d, 0xc2, 0xc6, 0xd9, 0x72, 0xf5, 0x17, 0xc7, 0x14, 0x50, 0x4d, 0x4b, 0x2b, 0x4e, + 0x1f, 0x37, 0xe1, 0xd4, 0xc9, 0x36, 0x66, 0xb2, 0x89, 0x97, 0x6c, 0xcc, 0x10, 0x18, 0xc5, 0x21, + 0x4e, 0xd0, 0x44, 0xaf, 0xc1, 0xb8, 0x88, 0x11, 0x10, 0x0f, 0xd3, 0xa9, 0xd8, 0x95, 0x08, 0x27, + 0x60, 0x38, 0x85, 0xcd, 0xf3, 0x8b, 0x39, 0xb7, 0xea, 0x44, 0x1c, 0x7d, 0x57, 0x5d, 0x6f, 0x2b, + 0x9c, 0x3a, 0xcd, 0xce, 0x07, 0x91, 0x5f, 0x2c, 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x1a, 0x8c, 0x37, + 0x03, 0x42, 0x1a, 0xec, 0x09, 0x23, 0xee, 0xb3, 0x12, 0x8f, 0x02, 0x44, 0x7b, 0x52, 0x4e, 0xc0, + 0xf6, 0x33, 0xca, 0x70, 0x8a, 0x02, 0xba, 0x0d, 0x03, 0xfe, 0x36, 0x09, 0x36, 0x89, 0x53, 0x9b, + 0x3a, 0xd3, 0xc6, 0xc1, 0x4d, 0x5c, 0x6e, 0xd7, 0x04, 0x6e, 0xc2, 0xfe, 0x47, 0x16, 0x77, 0xb6, + 0xff, 0x91, 0x8d, 0xa1, 0xbf, 0x65, 0xc1, 0x09, 0xa9, 0x51, 0xab, 0x34, 0xe9, 0xa8, 0xcf, 0xfb, + 0x5e, 0x18, 0x05, 0x3c, 0x6e, 0xcd, 0xc3, 0xf9, 0xb1, 0x5c, 0xd6, 0x72, 0x2a, 0x29, 0xe1, 0xfd, + 0x89, 0x3c, 0x8c, 0x10, 0xe7, 0xb7, 0x48, 0x1f, 0xdd, 0x21, 0x89, 0xe4, 0x61, 0x34, 0x1b, 0x2e, + 0xbd, 0xb1, 0xb0, 0x3a, 0xf5, 0x08, 0x0f, 0xba, 0x43, 0x37, 0x43, 0x25, 0x09, 0xc4, 0x69, 0x7c, + 0x74, 0x01, 0x0a, 0x7e, 0x38, 0xf5, 0x68, 0x9b, 0x4c, 0xf4, 0x7e, 0xed, 0x5a, 0x85, 0xdb, 0x81, + 0x5e, 0xab, 0xe0, 0x82, 0x1f, 0xca, 0x1c, 0x5f, 0xf4, 0xa5, 0x19, 0x4e, 0x3d, 0xc6, 0x45, 0xbd, + 0x32, 0xc7, 0x17, 0x2b, 0xc4, 0x31, 0x1c, 0x6d, 0xc2, 0x58, 0x68, 0xbc, 0xe8, 0xc3, 0xa9, 0xb3, + 0x6c, 0xa4, 0x1e, 0xcb, 0x9b, 0x34, 0x03, 0x5b, 0x4b, 0xbe, 0x63, 0x52, 0xc1, 0x49, 0xb2, 0x7c, + 0x77, 0x69, 0x32, 0x85, 0x70, 0xea, 0xf1, 0x0e, 0xbb, 0x4b, 0x43, 0xd6, 0x77, 0x97, 0x4e, 0x03, + 0x27, 0x68, 0x4e, 0x7f, 0x07, 0x4c, 0xa4, 0xd8, 0xa5, 0x83, 0xf8, 0x3c, 0x4c, 0x6f, 0xc1, 0x88, + 0xb1, 0x24, 0x1f, 0xa8, 0x49, 0xcc, 0xef, 0x0e, 0xc2, 0xa0, 0x32, 0x55, 0x40, 0xe7, 0x4d, 0x2b, + 0x98, 0x13, 0x49, 0x2b, 0x98, 0x81, 0xb2, 0x5f, 0x33, 0x0c, 0x5f, 0xd6, 0x32, 0x62, 0xc9, 0xe6, + 0x1d, 0x80, 0xdd, 0x3b, 0x66, 0x69, 0xea, 0x97, 0x62, 0xd7, 0xe6, 0x34, 0x3d, 0x6d, 0x35, 0x3a, + 0x97, 0x60, 0xc2, 0xf3, 0x19, 0x8f, 0x4e, 0x6a, 0x92, 0x01, 0x63, 0x7c, 0xd6, 0xa0, 0x1e, 0xeb, + 0x2c, 0x81, 0x80, 0xd3, 0x75, 0x68, 0x83, 0x9c, 0x51, 0x4a, 0xaa, 0x90, 0x38, 0x1f, 0x85, 0x05, + 0x94, 0xbe, 0x0d, 0xf9, 0xaf, 0x70, 0x6a, 0x3c, 0xff, 0x6d, 0xc8, 0x2b, 0x25, 0x99, 0xb1, 0x50, + 0x32, 0x63, 0x4c, 0x63, 0xd2, 0xf4, 0x6b, 0xcb, 0x65, 0xc1, 0xe6, 0x6b, 0x51, 0xde, 0x6b, 0xcb, + 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x43, 0x26, 0x6f, 0x9b, 0x2e, 0x97, 0xb5, + 0x1c, 0xa3, 0xac, 0x02, 0x16, 0x15, 0x99, 0x44, 0x9c, 0xbe, 0x8d, 0x98, 0x44, 0xbc, 0xff, 0x1e, + 0x25, 0xe2, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x3b, 0x70, 0xd4, 0x78, 0x8f, 0x2a, 0x4f, 0x35, 0xc8, + 0x57, 0x96, 0x27, 0x90, 0xe7, 0x4e, 0x89, 0x4e, 0x1f, 0x5d, 0xce, 0xa2, 0x84, 0xb3, 0x1b, 0x40, + 0x75, 0x98, 0xa8, 0xa6, 0x5a, 0x1d, 0xe8, 0xbe, 0x55, 0xb5, 0x2e, 0xd2, 0x2d, 0xa6, 0x09, 0xa3, + 0x97, 0x61, 0xe0, 0x6d, 0x9f, 0x1b, 0xb6, 0x89, 0xa7, 0x89, 0x8c, 0x92, 0x32, 0xf0, 0xc6, 0xb5, + 0x0a, 0x2b, 0xdf, 0xdf, 0x2d, 0x0d, 0x95, 0xfd, 0x9a, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x03, 0x16, + 0x4c, 0xa7, 0x1f, 0xbc, 0xaa, 0xd3, 0x23, 0xdd, 0x77, 0xda, 0x16, 0x8d, 0x4e, 0x2f, 0xe6, 0x92, + 0xc3, 0x6d, 0x9a, 0x42, 0x1f, 0xa6, 0xfb, 0x29, 0x74, 0xef, 0x12, 0x91, 0xa0, 0xfd, 0xe1, 0x78, + 0x3f, 0xd1, 0xd2, 0xfd, 0xdd, 0xd2, 0x18, 0x3f, 0x19, 0xdd, 0xbb, 0x2a, 0x1e, 0x3d, 0xaf, 0x80, + 0xbe, 0x1b, 0x8e, 0x06, 0x69, 0xd9, 0x30, 0x91, 0x4c, 0xf8, 0x93, 0xdd, 0x9c, 0xb2, 0xc9, 0x09, + 0xc7, 0x59, 0x04, 0x71, 0x76, 0x3b, 0xf6, 0xaf, 0x59, 0x4c, 0x27, 0x20, 0xba, 0x45, 0xc2, 0x56, + 0x3d, 0x3a, 0x04, 0x63, 0xb2, 0x45, 0x43, 0xdf, 0x7e, 0xcf, 0xd6, 0x60, 0xff, 0xd2, 0x62, 0xd6, + 0x60, 0x87, 0xe8, 0xd7, 0xf6, 0x06, 0x0c, 0x44, 0xa2, 0x35, 0xd1, 0xf5, 0x3c, 0xcb, 0x15, 0xd9, + 0x29, 0x66, 0x11, 0xa7, 0x1e, 0x39, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, 0xe3, 0x33, 0x20, 0x21, + 0x87, 0xa0, 0xd6, 0x5c, 0x30, 0xd5, 0x9a, 0xa5, 0x0e, 0x5f, 0x90, 0xa3, 0xde, 0xfc, 0xa7, 0x66, + 0xbf, 0x99, 0x70, 0xef, 0xdd, 0x6e, 0x86, 0x68, 0x7f, 0xde, 0x02, 0x88, 0x13, 0x80, 0x74, 0x91, + 0x90, 0xf9, 0x22, 0x7d, 0xd6, 0xf8, 0x91, 0x5f, 0xf5, 0xeb, 0x42, 0xf5, 0x72, 0x32, 0xd6, 0xac, + 0xf2, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0x23, 0xf2, 0x16, 0x63, 0x5d, 0xbf, 0x11, + 0x8d, 0xf7, 0x4b, 0x16, 0x1c, 0xc9, 0x72, 0x92, 0xa0, 0x8f, 0x64, 0x2e, 0xe6, 0x54, 0x26, 0xa2, + 0x6a, 0x36, 0x6f, 0x88, 0x72, 0xac, 0x30, 0xba, 0xce, 0x74, 0x7d, 0xb0, 0xe4, 0x14, 0xd7, 0x60, + 0xa4, 0x1c, 0x10, 0x8d, 0xbf, 0x78, 0x35, 0xce, 0x9b, 0x33, 0x38, 0xf7, 0xf4, 0x81, 0x23, 0x0f, + 0xd9, 0x5f, 0x2e, 0xc0, 0x11, 0x6e, 0xe8, 0x34, 0xbb, 0xed, 0xbb, 0xb5, 0xb2, 0x5f, 0x13, 0xae, + 0xad, 0x6f, 0xc2, 0x70, 0x53, 0x93, 0x4d, 0xb7, 0x0b, 0xb4, 0xae, 0xcb, 0xb0, 0x63, 0x69, 0x9a, + 0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x06, 0xc3, 0x64, 0xdb, 0xad, 0x2a, 0x6b, 0x99, 0xc2, 0x81, 0x2f, + 0x69, 0xd5, 0xca, 0xa2, 0x46, 0x07, 0x1b, 0x54, 0xbb, 0x36, 0x4f, 0xd6, 0x58, 0xb4, 0x9e, 0x0e, + 0x16, 0x32, 0x3f, 0x6e, 0xc1, 0xf1, 0x9c, 0xb0, 0xec, 0xb4, 0xb9, 0xdb, 0xcc, 0xa4, 0x4c, 0x2c, + 0x5b, 0xd5, 0x1c, 0x37, 0x34, 0xc3, 0x02, 0x8a, 0x3e, 0x0a, 0xd0, 0x8c, 0x53, 0x52, 0x76, 0x88, + 0x5f, 0x6d, 0x44, 0xb2, 0xd5, 0x82, 0x92, 0xaa, 0xcc, 0x95, 0x1a, 0x2d, 0xfb, 0x4b, 0x3d, 0xd0, + 0xcb, 0x0c, 0x93, 0x50, 0x19, 0xfa, 0x37, 0x79, 0xcc, 0xbc, 0xb6, 0xf3, 0x46, 0x71, 0x65, 0x10, + 0xbe, 0x78, 0xde, 0xb4, 0x52, 0x2c, 0xc9, 0xa0, 0x15, 0x98, 0xe4, 0xe9, 0x36, 0xeb, 0x0b, 0xa4, + 0xee, 0xec, 0x48, 0xb1, 0x6f, 0x81, 0x7d, 0xaa, 0x12, 0x7f, 0x2f, 0xa7, 0x51, 0x70, 0x56, 0x3d, + 0xf4, 0x2a, 0x8c, 0xd2, 0x67, 0xb8, 0xdf, 0x8a, 0x24, 0x25, 0x9e, 0xdf, 0x52, 0xbd, 0x4c, 0xd6, + 0x0c, 0x28, 0x4e, 0x60, 0xa3, 0x97, 0x61, 0xa4, 0x99, 0x12, 0x70, 0xf7, 0xc6, 0x92, 0x20, 0x53, + 0xa8, 0x6d, 0xe2, 0x32, 0x3f, 0x89, 0x16, 0xf3, 0x0a, 0x59, 0xdb, 0x0c, 0x48, 0xb8, 0xe9, 0xd7, + 0x6b, 0x8c, 0x03, 0xee, 0xd5, 0xfc, 0x24, 0x12, 0x70, 0x9c, 0xaa, 0x41, 0xa9, 0xac, 0x3b, 0x6e, + 0xbd, 0x15, 0x90, 0x98, 0x4a, 0x9f, 0x49, 0x65, 0x29, 0x01, 0xc7, 0xa9, 0x1a, 0x9d, 0x25, 0xf7, + 0xfd, 0xf7, 0x47, 0x72, 0x6f, 0xff, 0x4c, 0x01, 0x8c, 0xa9, 0x7d, 0x0f, 0xe7, 0xdd, 0x7c, 0x05, + 0x7a, 0x36, 0x82, 0x66, 0x55, 0x18, 0xe1, 0x65, 0x7e, 0x59, 0x9c, 0xfd, 0x9f, 0x7f, 0x19, 0xfd, + 0x8f, 0x59, 0x2d, 0xba, 0xc7, 0x8f, 0x96, 0x03, 0x9f, 0x5e, 0x72, 0x32, 0xac, 0xa6, 0x72, 0x47, + 0xea, 0x97, 0x81, 0x35, 0xda, 0x04, 0xa0, 0x16, 0x3e, 0x15, 0x9c, 0x82, 0x61, 0xaf, 0x56, 0x11, + 0xe1, 0x73, 0x24, 0x15, 0x74, 0x01, 0x86, 0x44, 0x2a, 0x44, 0xe6, 0x35, 0xc3, 0x37, 0x13, 0xb3, + 0xaf, 0x5b, 0x88, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x60, 0x01, 0x26, 0x33, 0xdc, 0x1e, 0xf9, 0x35, + 0xb2, 0xe1, 0x86, 0x51, 0xb0, 0x93, 0xbc, 0x9c, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x9e, 0x55, 0xfc, + 0xa2, 0x4a, 0x5e, 0x4e, 0xc2, 0xad, 0x48, 0x40, 0x0f, 0x98, 0xaa, 0xff, 0x0c, 0xf4, 0xb4, 0x42, + 0x22, 0x63, 0xdd, 0xab, 0x6b, 0x9b, 0x29, 0xec, 0x19, 0x84, 0x3e, 0x01, 0x37, 0x94, 0x16, 0x5a, + 0x7b, 0x02, 0x72, 0x3d, 0x34, 0x87, 0xd1, 0xce, 0x45, 0xc4, 0x73, 0xbc, 0x48, 0x3c, 0x14, 0xe3, + 0x18, 0xc8, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xc8, 0x75, 0x84, 0xa6, 0x5d, 0x6f, + 0xf8, 0x9e, 0x1b, 0xf9, 0xca, 0x70, 0x91, 0xc7, 0x3d, 0x26, 0xcd, 0xcd, 0x15, 0x51, 0x8e, 0x15, + 0x06, 0x3a, 0x0b, 0xbd, 0x4c, 0x28, 0x9e, 0x4c, 0x83, 0x86, 0xe7, 0x16, 0x78, 0x44, 0x49, 0x0e, + 0xd6, 0x6e, 0xf5, 0x62, 0xdb, 0x5b, 0xfd, 0x11, 0xca, 0xc1, 0xf8, 0xf5, 0xe4, 0x85, 0x42, 0xbb, + 0xeb, 0xfb, 0x75, 0xcc, 0x80, 0xe8, 0x31, 0x31, 0x5e, 0x09, 0x4b, 0x3d, 0xec, 0xd4, 0xfc, 0x50, + 0x1b, 0xb4, 0x27, 0xa0, 0x7f, 0x8b, 0xec, 0x04, 0xae, 0xb7, 0x91, 0xb4, 0xe0, 0xbc, 0xc2, 0x8b, + 0xb1, 0x84, 0x9b, 0x59, 0xbf, 0xfb, 0xef, 0x47, 0xd6, 0x6f, 0x7d, 0x05, 0x0c, 0x74, 0x64, 0x4f, + 0x7e, 0xa8, 0x08, 0x63, 0x78, 0x6e, 0xe1, 0xfd, 0x89, 0xb8, 0x9e, 0x9e, 0x88, 0xfb, 0x91, 0x1c, + 0xfb, 0x60, 0xb3, 0xf1, 0xcb, 0x16, 0x8c, 0xb1, 0x84, 0x8c, 0x22, 0x8a, 0x89, 0xeb, 0x7b, 0x87, + 0xf0, 0x14, 0x78, 0x04, 0x7a, 0x03, 0xda, 0xa8, 0x98, 0x41, 0xb5, 0xc7, 0x59, 0x4f, 0x30, 0x87, + 0xa1, 0x93, 0xd0, 0xc3, 0xba, 0x40, 0x27, 0x6f, 0x98, 0x1f, 0xc1, 0x0b, 0x4e, 0xe4, 0x60, 0x56, + 0xca, 0xe2, 0x29, 0x62, 0xd2, 0xac, 0xbb, 0xbc, 0xd3, 0xb1, 0xc9, 0xc2, 0xbb, 0x23, 0x44, 0x4a, + 0x66, 0xd7, 0xde, 0x59, 0x3c, 0xc5, 0x6c, 0x92, 0xed, 0x9f, 0xd9, 0x7f, 0x5e, 0x80, 0xd3, 0x99, + 0xf5, 0xba, 0x8e, 0xa7, 0xd8, 0xbe, 0xf6, 0x83, 0x4c, 0xdf, 0x56, 0x3c, 0x44, 0xfb, 0xf8, 0x9e, + 0x6e, 0xb9, 0xff, 0xde, 0x2e, 0xc2, 0x1c, 0x66, 0x0e, 0xd9, 0xbb, 0x24, 0xcc, 0x61, 0x66, 0xdf, + 0x72, 0xc4, 0x04, 0x7f, 0x53, 0xc8, 0xf9, 0x16, 0x26, 0x30, 0x38, 0x47, 0xcf, 0x19, 0x06, 0x0c, + 0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x9a, 0x85, 0xb1, 0x86, 0xeb, 0xd1, 0xc3, + 0x67, 0xc7, 0x64, 0xc5, 0x95, 0x2e, 0x63, 0xc5, 0x04, 0xe3, 0x24, 0x3e, 0x72, 0xb5, 0x10, 0x88, + 0xfc, 0xeb, 0x5e, 0x3e, 0xd0, 0xae, 0x9b, 0x31, 0xcd, 0x39, 0xd4, 0x28, 0x66, 0x84, 0x43, 0x5c, + 0xd1, 0xe4, 0x44, 0xc5, 0xee, 0xe5, 0x44, 0xc3, 0xd9, 0x32, 0xa2, 0xe9, 0x97, 0x61, 0xe4, 0x9e, + 0x75, 0x23, 0xf6, 0x37, 0x8a, 0xf0, 0x50, 0x9b, 0x6d, 0xcf, 0xcf, 0x7a, 0x63, 0x0e, 0xb4, 0xb3, + 0x3e, 0x35, 0x0f, 0x65, 0x38, 0xb2, 0xde, 0xaa, 0xd7, 0x77, 0x98, 0x23, 0x18, 0xa9, 0x49, 0x0c, + 0xc1, 0x53, 0x4a, 0xe1, 0xc8, 0x91, 0xa5, 0x0c, 0x1c, 0x9c, 0x59, 0x93, 0x3e, 0xb1, 0xe8, 0x4d, + 0xb2, 0xa3, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x82, 0x09, 0x67, 0xdb, + 0x71, 0x79, 0xfa, 0x0b, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x2c, 0x7a, 0x36, 0x89, 0x80, 0xd3, 0x75, + 0xd0, 0xeb, 0x80, 0xfc, 0x5b, 0xcc, 0xb9, 0xa4, 0x76, 0x89, 0x78, 0x42, 0xeb, 0xce, 0xe6, 0xae, + 0x18, 0x1f, 0x09, 0xd7, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x18, 0x5f, 0x5f, 0x7e, 0x30, 0xbe, + 0xf6, 0xe7, 0x62, 0xc7, 0xcc, 0x81, 0x6f, 0xc1, 0xc8, 0x41, 0x2d, 0xa6, 0x9f, 0x80, 0xfe, 0x40, + 0xe4, 0x64, 0x4f, 0x78, 0x5d, 0xcb, 0x8c, 0xd5, 0x12, 0x6e, 0xff, 0x17, 0x0b, 0x94, 0x2c, 0xd9, + 0x8c, 0xbb, 0xfd, 0x32, 0x33, 0xff, 0xe6, 0x52, 0x70, 0x2d, 0xd4, 0xd6, 0x51, 0xcd, 0xfc, 0x3b, + 0x06, 0x62, 0x13, 0x97, 0x2f, 0xb7, 0x30, 0x8e, 0xf0, 0x60, 0x3c, 0x20, 0x44, 0x58, 0x50, 0x85, + 0x81, 0x3e, 0x06, 0xfd, 0x35, 0x77, 0xdb, 0x0d, 0x85, 0x1c, 0xed, 0xc0, 0x7a, 0xbb, 0xf8, 0xfb, + 0x16, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x62, 0x81, 0xd2, 0x4b, 0x5e, 0x26, 0x4e, 0x3d, 0xda, + 0x44, 0xaf, 0x01, 0x48, 0x0a, 0x4a, 0xf6, 0x26, 0xad, 0xa5, 0x00, 0x2b, 0xc8, 0xbe, 0xf1, 0x0f, + 0x6b, 0x75, 0xd0, 0xab, 0xd0, 0xb7, 0xc9, 0x68, 0x89, 0x6f, 0x3b, 0xab, 0x54, 0x5d, 0xac, 0x74, + 0x7f, 0xb7, 0x74, 0xc4, 0x6c, 0x53, 0xde, 0x62, 0xbc, 0x96, 0xfd, 0x43, 0x85, 0x78, 0x4e, 0xdf, + 0x68, 0xf9, 0x91, 0x73, 0x08, 0x9c, 0xc8, 0x25, 0x83, 0x13, 0x79, 0x2c, 0x7b, 0xa1, 0x6a, 0x5d, + 0xca, 0xe5, 0x40, 0xae, 0x25, 0x38, 0x90, 0xc7, 0x3b, 0x93, 0x6a, 0xcf, 0x79, 0xfc, 0x73, 0x0b, + 0x26, 0x0c, 0xfc, 0x43, 0xb8, 0x00, 0x97, 0xcc, 0x0b, 0xf0, 0xe1, 0x8e, 0xdf, 0x90, 0x73, 0xf1, + 0x7d, 0x7f, 0x31, 0xd1, 0x77, 0x76, 0xe1, 0xbd, 0x0d, 0x3d, 0x9b, 0x4e, 0x50, 0x13, 0xef, 0xfa, + 0xf3, 0x5d, 0x8d, 0xf5, 0xcc, 0x65, 0x27, 0x10, 0x96, 0x16, 0x4f, 0xcb, 0x51, 0xa7, 0x45, 0x1d, + 0xad, 0x2c, 0x58, 0x53, 0xe8, 0x22, 0xf4, 0x85, 0x55, 0xbf, 0xa9, 0xfc, 0xe4, 0x58, 0x3a, 0xed, + 0x0a, 0x2b, 0xd9, 0xdf, 0x2d, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x13, 0x46, 0xd8, + 0x2f, 0x65, 0xf6, 0x58, 0xcc, 0x97, 0xc0, 0x54, 0x74, 0x44, 0x6e, 0x13, 0x6c, 0x14, 0x61, 0x93, + 0xd4, 0xf4, 0x06, 0x0c, 0xaa, 0xcf, 0x7a, 0xa0, 0xda, 0xfa, 0xff, 0x58, 0x84, 0xc9, 0x8c, 0x35, + 0x87, 0x42, 0x63, 0x26, 0x2e, 0x74, 0xb9, 0x54, 0xdf, 0xe1, 0x5c, 0x84, 0xec, 0x01, 0x58, 0x13, + 0x6b, 0xab, 0xeb, 0x46, 0xaf, 0x87, 0x24, 0xd9, 0x28, 0x2d, 0xea, 0xdc, 0x28, 0x6d, 0xec, 0xd0, + 0x86, 0x9a, 0x36, 0xa4, 0x7a, 0xfa, 0x40, 0xe7, 0xf4, 0x37, 0x7b, 0xe0, 0x48, 0x56, 0x0c, 0x69, + 0xf4, 0x19, 0x10, 0xe9, 0xff, 0xc5, 0xb4, 0x3e, 0xdf, 0x6e, 0x84, 0xf5, 0x9a, 0x33, 0xcc, 0x17, + 0x4c, 0x84, 0x6e, 0x9d, 0x91, 0xc7, 0x11, 0x2f, 0xec, 0x38, 0xcc, 0xa2, 0x4d, 0x16, 0x52, 0x49, + 0xdc, 0x9e, 0xf2, 0xf8, 0xf8, 0x50, 0xd7, 0x1d, 0x10, 0xf7, 0x6f, 0x98, 0x30, 0xa9, 0x92, 0xc5, + 0x9d, 0x4d, 0xaa, 0x64, 0xcb, 0x68, 0x19, 0xfa, 0xaa, 0xdc, 0x56, 0xa7, 0xd8, 0xf9, 0x08, 0xe3, + 0x86, 0x3a, 0xea, 0x00, 0x16, 0x06, 0x3a, 0x82, 0xc0, 0xb4, 0x0b, 0x43, 0xda, 0xc0, 0x3c, 0xd0, + 0xc5, 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xe8, 0x02, 0xfa, 0x31, 0xed, 0xee, 0x17, 0xe7, + 0xc1, 0x07, 0x0d, 0xde, 0xe9, 0x64, 0xc2, 0x05, 0x2f, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x31, 0x63, + 0x9e, 0xe7, 0xa6, 0x4e, 0x32, 0x2f, 0xfc, 0xf6, 0x71, 0xce, 0xed, 0x1f, 0xb7, 0x20, 0xe1, 0x24, + 0xa5, 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x0c, 0xf4, 0x04, 0x7e, 0x9d, 0x24, 0x53, 0xd3, 0x63, + 0xbf, 0x4e, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x16, 0x62, 0x0d, 0xeb, 0x0f, 0x74, 0xf1, 0xf4, 0x7e, + 0x04, 0x7a, 0xeb, 0x64, 0x9b, 0xd4, 0x93, 0x19, 0x44, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xf6, 0x2f, + 0xf7, 0xc0, 0xa9, 0xb6, 0x91, 0xd7, 0x28, 0x83, 0xb9, 0xe1, 0x44, 0xe4, 0xb6, 0xb3, 0x93, 0xcc, + 0x9c, 0x77, 0x89, 0x17, 0x63, 0x09, 0x67, 0xce, 0xc8, 0x3c, 0x93, 0x4c, 0x42, 0x38, 0x2c, 0x12, + 0xc8, 0x08, 0xa8, 0x29, 0x6c, 0x2c, 0xde, 0x0f, 0x61, 0xe3, 0xb3, 0x00, 0x61, 0x58, 0xe7, 0x06, + 0x97, 0x35, 0xe1, 0xe5, 0x1c, 0x67, 0x1c, 0xaa, 0x5c, 0x15, 0x10, 0xac, 0x61, 0xa1, 0x05, 0x18, + 0x6f, 0x06, 0x7e, 0xc4, 0x65, 0xed, 0x0b, 0xdc, 0x26, 0xb9, 0xd7, 0x0c, 0x7a, 0x55, 0x4e, 0xc0, + 0x71, 0xaa, 0x06, 0x7a, 0x01, 0x86, 0x44, 0x20, 0xac, 0xb2, 0xef, 0xd7, 0x85, 0x78, 0x4f, 0x99, + 0xe9, 0x56, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0x80, 0xdf, 0x9f, 0x59, 0x8d, 0x0b, 0xf1, + 0x35, 0xbc, 0x44, 0xd0, 0xfc, 0x81, 0xae, 0x82, 0xe6, 0xc7, 0x02, 0xcf, 0xc1, 0xae, 0xf5, 0xc9, + 0xd0, 0x51, 0x44, 0xf8, 0x95, 0x1e, 0x98, 0x14, 0x0b, 0xe7, 0x41, 0x2f, 0x97, 0xeb, 0xe9, 0xe5, + 0x72, 0x3f, 0x44, 0xa2, 0xef, 0xaf, 0x99, 0xc3, 0x5e, 0x33, 0x3f, 0x6c, 0x81, 0xc9, 0x43, 0xa2, + 0xff, 0x2f, 0x37, 0xf5, 0xe8, 0x0b, 0xb9, 0x3c, 0x69, 0x1c, 0x51, 0xfb, 0x9d, 0x25, 0x21, 0xb5, + 0xff, 0x93, 0x05, 0x0f, 0x77, 0xa4, 0x88, 0x16, 0x61, 0x90, 0x31, 0xba, 0xda, 0xbb, 0xf8, 0x71, + 0xe5, 0xb3, 0x20, 0x01, 0x39, 0x7c, 0x77, 0x5c, 0x13, 0x2d, 0xa6, 0x72, 0xbc, 0x3e, 0x91, 0x91, + 0xe3, 0xf5, 0xa8, 0x31, 0x3c, 0xf7, 0x98, 0xe4, 0xf5, 0x0b, 0xf4, 0xc6, 0x31, 0x7d, 0x12, 0x3f, + 0x64, 0x88, 0x73, 0xed, 0x84, 0x38, 0x17, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x1a, 0x8c, 0xb3, 0x08, + 0x99, 0xcc, 0x83, 0x46, 0x38, 0x33, 0x16, 0x62, 0x2b, 0xf9, 0xab, 0x09, 0x18, 0x4e, 0x61, 0xdb, + 0x7f, 0x5a, 0x84, 0x3e, 0xbe, 0xfd, 0x0e, 0xe1, 0xe1, 0xfb, 0x14, 0x0c, 0xba, 0x8d, 0x46, 0x8b, + 0xa7, 0xed, 0xec, 0x8d, 0x6d, 0xae, 0x97, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x49, 0x68, 0x12, 0xda, + 0x04, 0xe1, 0xe6, 0x1d, 0x9f, 0x59, 0x70, 0x22, 0x87, 0x73, 0x71, 0xea, 0x9e, 0x8d, 0x75, 0x0e, + 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x06, 0x2d, 0x13, 0x99, 0x1a, 0x9e, 0x6c, 0x43, 0xad, + 0xa2, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, 0x33, 0xc6, 0x4d, 0x3f, 0x9d, + 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0x7e, 0x11, 0x06, 0x15, 0xf1, 0x4e, 0x72, 0xc5, 0x61, + 0x9d, 0x61, 0xfb, 0x08, 0x8c, 0x25, 0xfa, 0x76, 0x20, 0xb1, 0xe4, 0xaf, 0x58, 0x30, 0xc6, 0x3b, + 0xb3, 0xe8, 0x6d, 0x8b, 0xdb, 0xe0, 0x2e, 0x1c, 0xa9, 0x67, 0x9c, 0xca, 0x62, 0xfa, 0xbb, 0x3f, + 0xc5, 0x95, 0x18, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0xe7, 0xe8, 0x8e, 0xa3, 0xa7, 0xae, 0x53, + 0x17, 0xd1, 0x36, 0x86, 0xf9, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, 0x3f, 0xb4, 0x60, 0x82, 0xf7, + 0xfc, 0x0a, 0xd9, 0x51, 0x67, 0xd3, 0xb7, 0xb2, 0xef, 0x22, 0x61, 0x74, 0x21, 0x27, 0x61, 0xb4, + 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, 0x65, 0x0b, 0xc4, 0x0a, 0x39, 0x04, 0x49, 0xcb, 0x77, 0x98, + 0x92, 0x96, 0xe9, 0xfc, 0x4d, 0x90, 0x23, 0x62, 0xf9, 0x6b, 0x0b, 0xc6, 0x39, 0x42, 0x6c, 0x05, + 0xf1, 0x2d, 0x9d, 0x87, 0x39, 0xf3, 0x8b, 0x32, 0xcd, 0x5a, 0xaf, 0x90, 0x9d, 0x35, 0xbf, 0xec, + 0x44, 0x9b, 0xd9, 0x1f, 0x65, 0x4c, 0x56, 0x4f, 0xdb, 0xc9, 0xaa, 0xc9, 0x0d, 0x64, 0x24, 0x26, + 0xec, 0x20, 0x00, 0x3e, 0x68, 0x62, 0x42, 0xfb, 0xcf, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0x71, 0xa3, + 0xec, 0x10, 0x2b, 0xd5, 0x2e, 0xba, 0xf8, 0x68, 0x52, 0x10, 0xac, 0x61, 0xdd, 0x97, 0xe1, 0x49, + 0x98, 0xb2, 0x14, 0x3b, 0x9b, 0xb2, 0x1c, 0x60, 0x44, 0xbf, 0xdc, 0x0f, 0x49, 0x9f, 0x49, 0x74, + 0x03, 0x86, 0xab, 0x4e, 0xd3, 0xb9, 0xe5, 0xd6, 0xdd, 0xc8, 0x25, 0x61, 0x3b, 0x3b, 0xb7, 0x79, + 0x0d, 0x4f, 0x18, 0x1f, 0x68, 0x25, 0xd8, 0xa0, 0x83, 0x66, 0x00, 0x9a, 0x81, 0xbb, 0xed, 0xd6, + 0xc9, 0x06, 0x13, 0x08, 0xb1, 0xf8, 0x3e, 0xdc, 0xe8, 0x4e, 0x96, 0x62, 0x0d, 0x23, 0x23, 0xf4, + 0x46, 0xf1, 0x01, 0x87, 0xde, 0x80, 0x43, 0x0b, 0xbd, 0xd1, 0x73, 0xa0, 0xd0, 0x1b, 0x03, 0x07, + 0x0e, 0xbd, 0xd1, 0xdb, 0x55, 0xe8, 0x0d, 0x0c, 0xc7, 0x24, 0xef, 0x49, 0xff, 0x2f, 0xb9, 0x75, + 0x22, 0x1e, 0x1c, 0x3c, 0x28, 0xd1, 0xf4, 0xde, 0x6e, 0xe9, 0x18, 0xce, 0xc4, 0xc0, 0x39, 0x35, + 0xd1, 0x47, 0x61, 0xca, 0xa9, 0xd7, 0xfd, 0xdb, 0x6a, 0x52, 0x17, 0xc3, 0xaa, 0x53, 0xe7, 0xca, + 0xa5, 0x7e, 0x46, 0xf5, 0xe4, 0xde, 0x6e, 0x69, 0x6a, 0x36, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf, + 0xc0, 0x60, 0x33, 0xf0, 0xab, 0x2b, 0x9a, 0x63, 0xf7, 0x69, 0x3a, 0x80, 0x65, 0x59, 0xb8, 0xbf, + 0x5b, 0x1a, 0x51, 0x7f, 0xd8, 0x85, 0x1f, 0x57, 0xc8, 0x88, 0x6a, 0x31, 0xf4, 0xa0, 0xa3, 0x5a, + 0x0c, 0xdf, 0xef, 0xa8, 0x16, 0x5b, 0x30, 0x59, 0x21, 0x81, 0xeb, 0xd4, 0xdd, 0xbb, 0x94, 0x27, + 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x06, 0x89, 0x53, 0xbf, 0xab, 0xe0, 0xdb, 0x9a, 0x5c, 0x46, 0x9e, + 0xf2, 0x31, 0x21, 0xfb, 0x7f, 0x5b, 0xd0, 0x2f, 0xfc, 0x30, 0x0f, 0x81, 0x33, 0x9d, 0x35, 0x54, + 0x32, 0xa5, 0xec, 0x49, 0x61, 0x9d, 0xc9, 0x55, 0xc6, 0x2c, 0x27, 0x94, 0x31, 0x0f, 0xb7, 0x23, + 0xd2, 0x5e, 0x0d, 0xf3, 0x77, 0x8b, 0xf4, 0x85, 0x60, 0x44, 0x04, 0x78, 0xf0, 0x43, 0xb0, 0x0a, + 0xfd, 0xa1, 0xf0, 0x48, 0x2f, 0xe4, 0xfb, 0xf2, 0x24, 0x27, 0x31, 0xb6, 0x81, 0x14, 0x3e, 0xe8, + 0x92, 0x48, 0xa6, 0xab, 0x7b, 0xf1, 0x01, 0xba, 0xba, 0x77, 0x8a, 0x99, 0xd0, 0x73, 0x3f, 0x62, + 0x26, 0xd8, 0x5f, 0x63, 0xb7, 0xb3, 0x5e, 0x7e, 0x08, 0x8c, 0xdb, 0x25, 0xf3, 0x1e, 0xb7, 0xdb, + 0xac, 0x2c, 0xd1, 0xa9, 0x1c, 0x06, 0xee, 0x97, 0x2c, 0x38, 0x95, 0xf1, 0x55, 0x1a, 0x37, 0xf7, + 0x34, 0x0c, 0x38, 0xad, 0x9a, 0xab, 0xf6, 0xb2, 0xa6, 0x2d, 0x9e, 0x15, 0xe5, 0x58, 0x61, 0xa0, + 0x79, 0x98, 0x20, 0x77, 0x9a, 0x2e, 0x57, 0xc3, 0xeb, 0xa6, 0xe3, 0x45, 0xee, 0xbc, 0xbb, 0x98, + 0x04, 0xe2, 0x34, 0xbe, 0x0a, 0x89, 0x56, 0xcc, 0x0d, 0x89, 0xf6, 0x0b, 0x16, 0x0c, 0x29, 0x9f, + 0xec, 0x07, 0x3e, 0xda, 0xaf, 0x99, 0xa3, 0xfd, 0x50, 0x9b, 0xd1, 0xce, 0x19, 0xe6, 0x3f, 0x28, + 0xa8, 0xfe, 0x96, 0xfd, 0x20, 0xea, 0x82, 0x4b, 0xbc, 0x77, 0xb7, 0x97, 0x0b, 0x30, 0xe4, 0x34, + 0x9b, 0x12, 0x20, 0xed, 0x17, 0x59, 0x2a, 0x85, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x0b, 0xa7, 0x98, + 0xeb, 0x85, 0x53, 0x03, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xb4, 0x4c, 0x98, 0x5b, 0xe7, 0x9f, 0x37, + 0xad, 0xc8, 0xad, 0xcf, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0xcc, 0xb2, 0x17, 0x5d, 0x0b, 0xf8, 0x33, + 0x55, 0x0b, 0x2a, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x8c, 0x3f, 0xc2, 0xda, 0xe8, 0x35, 0x0d, 0x61, + 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8b, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0x80, 0x7a, 0x7f, + 0x3e, 0xac, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf4, 0xb0, 0x7d, 0xed, 0x0f, 0x7b, 0xda, 0xb0, 0xee, + 0xcf, 0x1a, 0xc7, 0xf6, 0x43, 0x1f, 0x4f, 0x19, 0x37, 0x3d, 0xd3, 0xe1, 0xd6, 0x38, 0x80, 0x39, + 0x13, 0xcb, 0xab, 0xc6, 0xb2, 0x4e, 0x2d, 0x97, 0xc5, 0xbe, 0xd0, 0xf2, 0xaa, 0x09, 0x00, 0x8e, + 0x71, 0x28, 0xc3, 0xa6, 0xfe, 0x84, 0x53, 0x28, 0x0e, 0xbf, 0xad, 0xb0, 0x43, 0xac, 0x61, 0xa0, + 0xf3, 0x42, 0x68, 0xc1, 0x75, 0x0f, 0x0f, 0x25, 0x84, 0x16, 0x72, 0xb8, 0x34, 0x49, 0xd3, 0x05, + 0x18, 0x22, 0x77, 0x22, 0x12, 0x78, 0x4e, 0x9d, 0xb6, 0xd0, 0x1b, 0x47, 0x8c, 0x5d, 0x8c, 0x8b, + 0xb1, 0x8e, 0x83, 0xd6, 0x60, 0x2c, 0xe4, 0xb2, 0x3c, 0x95, 0xf4, 0x81, 0xcb, 0x44, 0x9f, 0x54, + 0xde, 0xf0, 0x26, 0x78, 0x9f, 0x15, 0xf1, 0xd3, 0x49, 0xc6, 0x08, 0x49, 0x92, 0x40, 0xaf, 0xc2, + 0x68, 0xdd, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x6c, 0x7c, 0x06, 0xcc, 0xec, 0xfc, 0x57, + 0x0d, 0x28, 0x4e, 0x60, 0x53, 0x06, 0x51, 0x2f, 0x11, 0x89, 0x4a, 0x1c, 0x6f, 0x83, 0x84, 0x53, + 0x83, 0xec, 0xab, 0x18, 0x83, 0x78, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x17, 0x61, 0x58, 0x7e, + 0xbe, 0x16, 0x52, 0x27, 0x76, 0x68, 0xd2, 0x60, 0xd8, 0xc0, 0x44, 0x21, 0x1c, 0x95, 0xff, 0xd7, + 0x02, 0x67, 0x7d, 0xdd, 0xad, 0x8a, 0x38, 0x13, 0xdc, 0xf9, 0xfb, 0x23, 0xd2, 0xd3, 0x74, 0x31, + 0x0b, 0x69, 0x7f, 0xb7, 0x74, 0x52, 0x8c, 0x5a, 0x26, 0x1c, 0x67, 0xd3, 0x46, 0x2b, 0x30, 0xc9, + 0x6d, 0x60, 0xe6, 0x37, 0x49, 0x75, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x97, 0xd3, + 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0xa6, 0x9a, 0xad, 0x5b, 0x75, 0x37, 0xdc, 0x5c, 0xf5, 0x23, + 0x66, 0x42, 0x36, 0x5b, 0xab, 0x05, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x61, 0x90, 0xca, + 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x0b, 0x47, 0x13, 0x0b, 0x41, 0xc4, 0x33, 0x19, 0xcd, 0x4f, + 0xf9, 0x54, 0xc9, 0xaa, 0x20, 0x42, 0x03, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x25, 0x00, 0xb7, + 0xb9, 0xe4, 0x34, 0xdc, 0x3a, 0x7d, 0x8e, 0x4e, 0xb2, 0x35, 0x42, 0x9f, 0x26, 0xb0, 0x5c, 0x96, + 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc2, 0xa8, 0xf8, 0xb7, 0x23, 0xa6, + 0x74, 0x42, 0x65, 0x07, 0x1d, 0x95, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, 0x80, + 0x53, 0x32, 0x35, 0xa9, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xcf, 0xd2, 0x00, 0xf7, 0x29, 0x9a, + 0x6d, 0x87, 0x88, 0xdb, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xd1, 0x38, 0x12, + 0xe6, 0xd5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0x47, 0x5d, 0x2f, 0x6b, 0x55, 0x1f, 0x63, 0x84, + 0x3e, 0xcc, 0x9d, 0xe5, 0xdb, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0x96, 0x61, 0x32, 0xe2, + 0x05, 0x0b, 0x6e, 0xc8, 0xd3, 0xb8, 0xd0, 0x67, 0xdf, 0x71, 0xd6, 0xdc, 0x71, 0xba, 0x9a, 0xd7, + 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0x9d, 0x19, 0x80, 0x7e, 0xdd, 0xa2, 0xb5, 0x35, 0x46, 0x1f, 0x7d, + 0x0a, 0x86, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xd9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, 0x41, + 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x35, 0x23, 0xc8, 0xc5, 0xf9, 0xee, 0x98, 0xa2, 0xee, + 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x15, 0x06, 0xaa, 0x75, 0x97, 0x78, 0xd1, 0x72, 0xb9, + 0x5d, 0x80, 0xd2, 0x79, 0x81, 0x23, 0xb6, 0xa2, 0xc8, 0xbe, 0xc4, 0xcb, 0xb0, 0xa2, 0x60, 0x5f, + 0x84, 0xa1, 0x4a, 0x9d, 0x90, 0x26, 0xf7, 0xe3, 0x42, 0x4f, 0xb0, 0x87, 0x09, 0x63, 0x2d, 0x2d, + 0xc6, 0x5a, 0xea, 0x6f, 0x0e, 0xc6, 0x54, 0x4a, 0xb8, 0xfd, 0xdb, 0x05, 0x28, 0x75, 0x48, 0x02, + 0x96, 0xd0, 0xb7, 0x59, 0x5d, 0xe9, 0xdb, 0x66, 0x61, 0x2c, 0xfe, 0xa7, 0x8b, 0xf2, 0x94, 0x31, + 0xf4, 0x0d, 0x13, 0x8c, 0x93, 0xf8, 0x5d, 0xfb, 0xb5, 0xe8, 0x2a, 0xbb, 0x9e, 0x8e, 0x9e, 0x59, + 0x86, 0xaa, 0xbe, 0xb7, 0xfb, 0xb7, 0x77, 0xae, 0xda, 0xd5, 0xfe, 0x5a, 0x01, 0x8e, 0xaa, 0x21, + 0x7c, 0xef, 0x0e, 0xdc, 0xf5, 0xf4, 0xc0, 0xdd, 0x07, 0xa5, 0xb5, 0x7d, 0x0d, 0xfa, 0x78, 0xd4, + 0xd4, 0x2e, 0x78, 0xfe, 0x47, 0xcc, 0xe0, 0xf4, 0x8a, 0xcd, 0x34, 0x02, 0xd4, 0xff, 0x80, 0x05, + 0x63, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0x7b, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0x33, 0xd0, + 0xb3, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xd9, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0x1f, 0x59, 0xd0, + 0xbb, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x9b, 0xef, 0x42, 0x2f, 0x40, + 0x1f, 0x59, 0x5f, 0x27, 0xd5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0xd1, 0xb7, 0xc8, 0x4a, 0x29, 0x13, + 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x37, 0x61, 0x30, 0x72, 0x1b, 0x64, 0xb6, 0x56, 0x13, + 0x36, 0x01, 0xf7, 0x10, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0x8b, 0x05, 0x80, 0x38, + 0x14, 0x5c, 0xa7, 0x4f, 0x9c, 0x4b, 0x69, 0x8b, 0xcf, 0x66, 0x68, 0x8b, 0x51, 0x4c, 0x30, 0x43, + 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd5, 0x30, 0xf5, 0x1c, 0x64, 0x98, 0xe6, 0x61, 0x22, 0x0e, 0x65, + 0x67, 0x46, 0xf2, 0x64, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x33, 0x2a, 0xa2, + 0x97, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x30, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, 0xab, + 0x0e, 0xff, 0x29, 0x0b, 0x8e, 0x24, 0xdb, 0x61, 0x7e, 0xf7, 0x9f, 0xb7, 0xe0, 0x68, 0x9c, 0x03, + 0x27, 0x6d, 0x82, 0xf0, 0x7c, 0xdb, 0x28, 0x65, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, 0x91, + 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xab, 0x07, 0xa6, 0xf2, 0xc2, 0x9b, 0x31, 0x4f, 0x23, 0xe7, 0x4e, + 0x65, 0x8b, 0xdc, 0x16, 0xfe, 0x1c, 0xb1, 0xa7, 0x11, 0x2f, 0xc6, 0x12, 0x9e, 0x4c, 0x7b, 0x54, + 0xe8, 0x32, 0xed, 0xd1, 0x26, 0x4c, 0xdc, 0xde, 0x24, 0xde, 0x75, 0x2f, 0x74, 0x22, 0x37, 0x5c, + 0x77, 0x99, 0x02, 0x9d, 0xaf, 0x1b, 0x99, 0xba, 0x7f, 0xe2, 0x66, 0x12, 0x61, 0x7f, 0xb7, 0x74, + 0xca, 0x28, 0x88, 0xbb, 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0xce, 0x1a, 0xd5, 0xf3, 0x80, 0xb3, + 0x46, 0x35, 0x5c, 0x61, 0x76, 0x23, 0xdd, 0x48, 0xd8, 0xb3, 0x75, 0x45, 0x95, 0x62, 0x0d, 0x03, + 0x7d, 0x02, 0x90, 0x9e, 0xf6, 0xcf, 0x88, 0x2e, 0xfb, 0xcc, 0xde, 0x6e, 0x09, 0xad, 0xa6, 0xa0, + 0xfb, 0xbb, 0xa5, 0x49, 0x5a, 0xba, 0xec, 0xd1, 0xe7, 0x6f, 0x1c, 0x92, 0x2f, 0x83, 0x10, 0xba, + 0x09, 0xe3, 0xb4, 0x94, 0xed, 0x28, 0x19, 0xba, 0x96, 0x3f, 0x59, 0x9f, 0xda, 0xdb, 0x2d, 0x8d, + 0xaf, 0x26, 0x60, 0x79, 0xa4, 0x53, 0x44, 0x32, 0x92, 0x47, 0x0d, 0x74, 0x9b, 0x3c, 0xca, 0xfe, + 0xbc, 0x05, 0x27, 0xe8, 0x05, 0x57, 0xbb, 0x9a, 0xa3, 0x45, 0x77, 0x9a, 0x2e, 0xd7, 0xd3, 0x88, + 0xab, 0x86, 0xc9, 0xea, 0xca, 0xcb, 0x5c, 0x4b, 0xa3, 0xa0, 0xf4, 0x84, 0xdf, 0x72, 0xbd, 0x5a, + 0xf2, 0x84, 0xbf, 0xe2, 0x7a, 0x35, 0xcc, 0x20, 0xea, 0xca, 0x2a, 0xe6, 0x46, 0xa9, 0xff, 0x0a, + 0xdd, 0xab, 0xb4, 0x2f, 0xdf, 0xd2, 0x6e, 0xa0, 0xa7, 0x74, 0x9d, 0xaa, 0x30, 0x9f, 0xcc, 0xd5, + 0xa7, 0x7e, 0xce, 0x02, 0xe1, 0xfd, 0xde, 0xc5, 0x9d, 0xfc, 0x26, 0x0c, 0x6f, 0xa7, 0x53, 0xa2, + 0x9e, 0xc9, 0x0f, 0x07, 0x20, 0x12, 0xa1, 0x2a, 0x16, 0xdd, 0x48, 0x7f, 0x6a, 0xd0, 0xb2, 0x6b, + 0x20, 0xa0, 0x0b, 0x84, 0x69, 0x35, 0x3a, 0xf7, 0xe6, 0x59, 0x80, 0x1a, 0xc3, 0x65, 0x79, 0xd2, + 0x0b, 0x26, 0xc7, 0xb5, 0xa0, 0x20, 0x58, 0xc3, 0xb2, 0x7f, 0xae, 0x08, 0x43, 0x32, 0x05, 0x67, + 0xcb, 0xeb, 0x46, 0xf6, 0x78, 0xa0, 0x9c, 0xfc, 0xe8, 0x2d, 0x98, 0x08, 0x48, 0xb5, 0x15, 0x84, + 0xee, 0x36, 0x91, 0x60, 0xb1, 0x49, 0x66, 0x78, 0x92, 0x84, 0x04, 0x70, 0x9f, 0x85, 0xc8, 0x4a, + 0x14, 0x32, 0xa5, 0x71, 0x9a, 0x10, 0x3a, 0x0f, 0x83, 0x4c, 0xf4, 0x5e, 0x8e, 0x05, 0xc2, 0x4a, + 0xf0, 0xb5, 0x22, 0x01, 0x38, 0xc6, 0x61, 0x8f, 0x83, 0xd6, 0x2d, 0x86, 0x9e, 0xf0, 0x04, 0xaf, + 0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0xa3, 0x30, 0xce, 0xeb, 0x05, 0x7e, 0xd3, 0xd9, 0xe0, 0x2a, 0xc1, + 0x5e, 0x15, 0x5e, 0x67, 0x7c, 0x25, 0x01, 0xdb, 0xdf, 0x2d, 0x1d, 0x49, 0x96, 0xb1, 0x6e, 0xa7, + 0xa8, 0x30, 0xcb, 0x3f, 0xde, 0x08, 0xbd, 0x33, 0x52, 0x06, 0x83, 0x31, 0x08, 0xeb, 0x78, 0xf6, + 0x5f, 0x59, 0x30, 0xa1, 0x4d, 0x55, 0xd7, 0x79, 0x2a, 0x8c, 0x41, 0x2a, 0x74, 0x31, 0x48, 0x07, + 0x8b, 0xf6, 0x90, 0x39, 0xc3, 0x3d, 0xf7, 0x69, 0x86, 0xed, 0x4f, 0x01, 0x4a, 0xe7, 0x77, 0x45, + 0xaf, 0x73, 0x43, 0x7e, 0x37, 0x20, 0xb5, 0x76, 0x0a, 0x7f, 0x3d, 0x72, 0x8e, 0xf4, 0x5c, 0xe5, + 0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x60, 0x0f, 0x8c, 0x27, 0x63, 0x75, 0xa0, 0xcb, 0xd0, 0xc7, 0xb9, + 0x74, 0x41, 0xbe, 0x8d, 0x3d, 0x99, 0x16, 0xe1, 0x83, 0xf1, 0x2b, 0x82, 0xd1, 0x17, 0xf5, 0xd1, + 0x5b, 0x30, 0x54, 0xf3, 0x6f, 0x7b, 0xb7, 0x9d, 0xa0, 0x36, 0x5b, 0x5e, 0x16, 0x27, 0x44, 0xa6, + 0x00, 0x6a, 0x21, 0x46, 0xd3, 0xa3, 0x86, 0x30, 0xdb, 0x89, 0x18, 0x84, 0x75, 0x72, 0x68, 0x8d, + 0xa5, 0x2c, 0x5a, 0x77, 0x37, 0x56, 0x9c, 0x66, 0x3b, 0xaf, 0xae, 0x79, 0x89, 0xa4, 0x51, 0x1e, + 0x11, 0x79, 0x8d, 0x38, 0x00, 0xc7, 0x84, 0xd0, 0x67, 0x60, 0x32, 0xcc, 0x51, 0x89, 0xe5, 0xa5, + 0xfb, 0x6e, 0xa7, 0x25, 0xe2, 0xc2, 0x94, 0x2c, 0xe5, 0x59, 0x56, 0x33, 0xe8, 0x0e, 0x20, 0x21, + 0x7a, 0x5e, 0x0b, 0x5a, 0x61, 0x34, 0xd7, 0xf2, 0x6a, 0x75, 0x99, 0xd2, 0xe8, 0x83, 0xd9, 0x72, + 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x8b, 0xdd, 0x9b, 0xc6, 0xc0, 0x19, 0x6d, 0xd8, 0x9f, 0xeb, 0x81, + 0x69, 0x99, 0x50, 0x39, 0xc3, 0x7b, 0xe5, 0xb3, 0x56, 0xc2, 0x7d, 0xe5, 0xa5, 0xfc, 0x83, 0xfe, + 0x81, 0x39, 0xb1, 0x7c, 0x21, 0xed, 0xc4, 0xf2, 0xca, 0x01, 0xbb, 0x71, 0xdf, 0x5c, 0x59, 0xde, + 0xb3, 0xfe, 0x27, 0x7b, 0x47, 0xc0, 0xb8, 0x9a, 0x11, 0xe6, 0x81, 0xd1, 0xcb, 0x52, 0x75, 0x94, + 0xf3, 0xfc, 0xbf, 0x2c, 0x70, 0x8c, 0xcb, 0x7e, 0x58, 0x86, 0x4f, 0x67, 0xe7, 0xac, 0xa2, 0x43, + 0x69, 0x92, 0x46, 0x33, 0xda, 0x59, 0x70, 0x03, 0xd1, 0xe3, 0x4c, 0x9a, 0x8b, 0x02, 0x27, 0x4d, + 0x53, 0x42, 0xb0, 0xa2, 0x83, 0xb6, 0x61, 0x62, 0x83, 0x45, 0x7c, 0xd2, 0x72, 0x1b, 0x8b, 0x73, + 0x21, 0x73, 0xdf, 0x5e, 0x9a, 0x5f, 0xcc, 0x4f, 0x84, 0xcc, 0x1f, 0x7f, 0x29, 0x14, 0x9c, 0x6e, + 0x82, 0x6e, 0x8d, 0x23, 0xce, 0xed, 0x70, 0xb1, 0xee, 0x84, 0x91, 0x5b, 0x9d, 0xab, 0xfb, 0xd5, + 0xad, 0x4a, 0xe4, 0x07, 0x32, 0x01, 0x62, 0xe6, 0xdb, 0x6b, 0xf6, 0x66, 0x25, 0x85, 0x6f, 0x34, + 0x3f, 0xb5, 0xb7, 0x5b, 0x3a, 0x92, 0x85, 0x85, 0x33, 0xdb, 0x42, 0xab, 0xd0, 0xbf, 0xe1, 0x46, + 0x98, 0x34, 0x7d, 0x71, 0x5a, 0x64, 0x1e, 0x85, 0x97, 0x38, 0x8a, 0xd1, 0x12, 0x8b, 0x48, 0x25, + 0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x4b, 0xa0, 0x2f, 0x5f, 0x00, 0x9b, 0xb6, 0xbd, 0xcb, 0xbc, + 0x06, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0xb6, 0x8b, 0xc5, 0xb3, 0xba, 0x64, 0xc8, 0xcf, 0xe6, 0xfa, + 0xe9, 0xd3, 0x78, 0x75, 0xa9, 0x82, 0x69, 0x45, 0xe6, 0xf6, 0x1a, 0x56, 0x43, 0x57, 0x24, 0x5c, + 0xca, 0xf4, 0x02, 0x5e, 0xae, 0xcc, 0x57, 0x96, 0x0d, 0x1a, 0x2c, 0xaa, 0x21, 0x2b, 0xc6, 0xbc, + 0x3a, 0xba, 0x01, 0x83, 0x1b, 0xfc, 0xe0, 0x5b, 0x0f, 0x45, 0x52, 0xf5, 0xcc, 0xcb, 0xe8, 0x92, + 0x44, 0x32, 0xe8, 0xb1, 0x2b, 0x43, 0x81, 0x70, 0x4c, 0x0a, 0x7d, 0xce, 0x82, 0xa3, 0xc9, 0xac, + 0xf4, 0xcc, 0x59, 0x4d, 0x98, 0xa9, 0x65, 0x3a, 0x00, 0x94, 0xb3, 0x2a, 0x18, 0x0d, 0x32, 0xf5, + 0x4b, 0x26, 0x1a, 0xce, 0x6e, 0x8e, 0x0e, 0x74, 0x70, 0xab, 0xd6, 0x2e, 0x47, 0x4f, 0x22, 0x30, + 0x11, 0x1f, 0x68, 0x3c, 0xb7, 0x80, 0x69, 0x45, 0xb4, 0x06, 0xb0, 0x5e, 0x27, 0x22, 0xe2, 0xa3, + 0x30, 0x8a, 0xca, 0xbc, 0xfd, 0x97, 0x14, 0x96, 0xa0, 0xc3, 0x5e, 0xa2, 0x71, 0x29, 0xd6, 0xe8, + 0xd0, 0xa5, 0x54, 0x75, 0xbd, 0x1a, 0x09, 0x98, 0x72, 0x2b, 0x67, 0x29, 0xcd, 0x33, 0x8c, 0xf4, + 0x52, 0xe2, 0xe5, 0x58, 0x50, 0x60, 0xb4, 0x48, 0x73, 0x73, 0x3d, 0x6c, 0x97, 0x72, 0x62, 0x9e, + 0x34, 0x37, 0x13, 0x0b, 0x8a, 0xd3, 0x62, 0xe5, 0x58, 0x50, 0xa0, 0x5b, 0x66, 0x9d, 0x6e, 0x20, + 0x12, 0x4c, 0x8d, 0xe5, 0x6f, 0x99, 0x25, 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, + 0x49, 0x93, 0xdb, 0x19, 0x67, 0x34, 0x9f, 0xea, 0xc0, 0xed, 0x18, 0x74, 0xdb, 0xf3, 0x3b, 0x2f, + 0x41, 0x61, 0xbd, 0xca, 0x94, 0x62, 0x39, 0x3a, 0x83, 0xa5, 0x79, 0x83, 0x1a, 0x0b, 0xe1, 0xbe, + 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xba, 0xf4, 0x9d, 0xbb, 0xad, 0x80, 0x2c, 0xb9, 0x75, 0x22, 0xd2, + 0x4f, 0x64, 0x2e, 0xfd, 0x59, 0x89, 0x94, 0x5e, 0xfa, 0x0a, 0x84, 0x63, 0x52, 0x94, 0x6e, 0xcc, + 0x83, 0x4d, 0xe6, 0xd3, 0x55, 0xac, 0x56, 0x9a, 0x6e, 0x26, 0x17, 0xb6, 0x05, 0x23, 0xdb, 0x61, + 0x73, 0x93, 0xc8, 0x53, 0x91, 0xa9, 0xeb, 0x72, 0x22, 0x55, 0xdc, 0x10, 0x88, 0x6e, 0x10, 0xb5, + 0x9c, 0x7a, 0xea, 0x20, 0x67, 0xa2, 0x95, 0x1b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0x0b, 0xe1, 0x6d, + 0x1e, 0x4e, 0x8e, 0x29, 0xee, 0x72, 0x16, 0x42, 0x46, 0xc4, 0x39, 0xbe, 0x10, 0x04, 0x00, 0x4b, + 0x22, 0x6a, 0xb0, 0xd9, 0x05, 0x74, 0xac, 0xc3, 0x60, 0xa7, 0xfa, 0x1b, 0x0f, 0x36, 0xbb, 0x70, + 0x62, 0x52, 0xec, 0xa2, 0x69, 0x66, 0x24, 0xf0, 0x67, 0x6a, 0xbb, 0x9c, 0x8b, 0xa6, 0x53, 0xc2, + 0x7f, 0x7e, 0xd1, 0x64, 0x61, 0xe1, 0xcc, 0xb6, 0xe8, 0xc7, 0x35, 0x65, 0x64, 0x40, 0x91, 0x22, + 0xe3, 0x89, 0x9c, 0xc0, 0x9a, 0xe9, 0xf0, 0x81, 0xfc, 0xe3, 0x14, 0x08, 0xc7, 0xa4, 0x50, 0x0d, + 0x46, 0x9b, 0x46, 0xc4, 0x59, 0x96, 0xea, 0x23, 0x87, 0x2f, 0xc8, 0x8a, 0x4d, 0xcb, 0x25, 0x44, + 0x26, 0x04, 0x27, 0x68, 0x32, 0xcb, 0x3d, 0xee, 0xea, 0xc7, 0x32, 0x81, 0xe4, 0x4c, 0x75, 0x86, + 0x37, 0x20, 0x9f, 0x6a, 0x01, 0xc0, 0x92, 0x08, 0x1d, 0x0d, 0xe1, 0xa0, 0xe6, 0x87, 0x2c, 0xa1, + 0x4e, 0x9e, 0x82, 0x3d, 0x4b, 0x4d, 0x24, 0xc3, 0xac, 0x0b, 0x10, 0x8e, 0x49, 0xd1, 0x93, 0x9c, + 0x5e, 0x78, 0x27, 0xf3, 0x4f, 0xf2, 0xe4, 0x75, 0xc7, 0x4e, 0x72, 0x7a, 0xd9, 0x15, 0xc5, 0x55, + 0xa7, 0xa2, 0x82, 0xb3, 0x64, 0x20, 0x39, 0xfd, 0x52, 0x61, 0xc5, 0xd3, 0xfd, 0x52, 0x20, 0x1c, + 0x93, 0x62, 0x57, 0x31, 0x0b, 0x4d, 0x77, 0xba, 0xcd, 0x55, 0x4c, 0x11, 0x32, 0xae, 0x62, 0x2d, + 0x74, 0x9d, 0xfd, 0x83, 0x05, 0x38, 0xdd, 0x7e, 0xdf, 0xc6, 0x3a, 0xb4, 0x72, 0x6c, 0xb3, 0x94, + 0xd0, 0xa1, 0x71, 0x89, 0x4e, 0x8c, 0xd5, 0x75, 0xc0, 0xe1, 0x4b, 0x30, 0xa1, 0xdc, 0x11, 0xeb, + 0x6e, 0x75, 0x47, 0x4b, 0xce, 0xa9, 0x42, 0xf3, 0x54, 0x92, 0x08, 0x38, 0x5d, 0x07, 0xcd, 0xc2, + 0x98, 0x51, 0xb8, 0xbc, 0x20, 0x9e, 0xff, 0x71, 0x1a, 0x0b, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, + 0x5b, 0x70, 0x3c, 0x27, 0x0f, 0x7b, 0xd7, 0xf1, 0x74, 0xd7, 0x61, 0xac, 0x69, 0x56, 0xed, 0x10, + 0x02, 0xdc, 0xc8, 0xf6, 0xae, 0xfa, 0x9a, 0x00, 0xe0, 0x24, 0x51, 0xfb, 0x67, 0x0b, 0x70, 0xaa, + 0xad, 0x7d, 0x3d, 0xc2, 0x70, 0x6c, 0xa3, 0x11, 0x3a, 0xf3, 0x01, 0xa9, 0x11, 0x2f, 0x72, 0x9d, + 0x7a, 0xa5, 0x49, 0xaa, 0x9a, 0x16, 0x94, 0x19, 0xaa, 0x5f, 0x5a, 0xa9, 0xcc, 0xa6, 0x31, 0x70, + 0x4e, 0x4d, 0xb4, 0x04, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x9e, 0xb8, 0x69, 0x7a, 0x38, 0xa3, 0x06, + 0x7a, 0x11, 0x46, 0x94, 0xdd, 0xbe, 0x36, 0xe3, 0xec, 0x82, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8, + 0x02, 0xcf, 0x6f, 0x24, 0x32, 0x61, 0x09, 0x95, 0xe9, 0x98, 0x4c, 0x5e, 0x24, 0x8a, 0xb1, 0x8e, + 0x33, 0x77, 0xf1, 0x77, 0xbe, 0x79, 0xfa, 0x03, 0xbf, 0xff, 0xcd, 0xd3, 0x1f, 0xf8, 0xc3, 0x6f, + 0x9e, 0xfe, 0xc0, 0xf7, 0xec, 0x9d, 0xb6, 0x7e, 0x67, 0xef, 0xb4, 0xf5, 0xfb, 0x7b, 0xa7, 0xad, + 0x3f, 0xdc, 0x3b, 0x6d, 0xfd, 0xd7, 0xbd, 0xd3, 0xd6, 0x17, 0xff, 0xe4, 0xf4, 0x07, 0xde, 0x44, + 0x71, 0x84, 0xea, 0xf3, 0x74, 0x76, 0xce, 0x6f, 0x5f, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x67, 0xd5, 0x38, 0x2d, 0xc3, 0x23, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8494,43 +8661,6 @@ func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClaimSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClaimSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClaimSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ResourceClaimTemplateName != nil { - i -= len(*m.ResourceClaimTemplateName) - copy(dAtA[i:], *m.ResourceClaimTemplateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) - i-- - dAtA[i] = 0x12 - } - if m.ResourceClaimName != nil { - i -= len(*m.ResourceClaimName) - copy(dAtA[i:], *m.ResourceClaimName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9753,6 +9883,32 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.AllocatedResourcesStatus) > 0 { + for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllocatedResourcesStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } if len(m.VolumeMounts) > 0 { for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { { @@ -9872,6 +10028,41 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11972,6 +12163,39 @@ func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ImageVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PullPolicy) + copy(dAtA[i:], m.PullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.Reference) + copy(dAtA[i:], m.Reference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reference))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12428,6 +12652,42 @@ func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LinuxContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x18 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.GID)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.UID)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13260,6 +13520,39 @@ func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *NodeFeatures) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeFeatures) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + i-- + if *m.SupplementalGroupsPolicy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13395,6 +13688,16 @@ func (m *NodeRuntimeHandlerFeatures) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + if m.UserNamespaces != nil { + i-- + if *m.UserNamespaces { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.RecursiveReadOnlyMounts != nil { i-- if *m.RecursiveReadOnlyMounts { @@ -13639,6 +13942,18 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Features != nil { + { + size, err := m.Features.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } if len(m.RuntimeHandlers) > 0 { for iNdEx := len(m.RuntimeHandlers) - 1; iNdEx >= 0; iNdEx-- { { @@ -15902,16 +16217,20 @@ func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ResourceClaimTemplateName != nil { + i -= len(*m.ResourceClaimTemplateName) + copy(dAtA[i:], *m.ResourceClaimTemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) + i-- + dAtA[i] = 0x22 + } + if m.ResourceClaimName != nil { + i -= len(*m.ResourceClaimName) + copy(dAtA[i:], *m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) + i-- + dAtA[i] = 0x1a } - i-- - dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -16003,6 +16322,13 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SupplementalGroupsPolicy != nil { + i -= len(*m.SupplementalGroupsPolicy) + copy(dAtA[i:], *m.SupplementalGroupsPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SupplementalGroupsPolicy))) + i-- + dAtA[i] = 0x62 + } if m.AppArmorProfile != nil { { size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i]) @@ -17878,6 +18204,11 @@ func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -17929,6 +18260,39 @@ func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceHealth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHealth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Health) + copy(dAtA[i:], m.Health) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Health))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceID) + copy(dAtA[i:], m.ResourceID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -18278,6 +18642,48 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20738,6 +21144,20 @@ func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } if m.Ephemeral != nil { { size, err := m.Ephemeral.MarshalToSizedBuffer(dAtA[:i]) @@ -21596,23 +22016,6 @@ func (m *CinderVolumeSource) Size() (n int) { return n } -func (m *ClaimSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResourceClaimName != nil { - l = len(*m.ResourceClaimName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceClaimTemplateName != nil { - l = len(*m.ResourceClaimTemplateName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *ClientIPConfig) Size() (n int) { if m == nil { return 0 @@ -22097,6 +22500,29 @@ func (m *ContainerStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllocatedResourcesStatus) > 0 { + for _, e := range m.AllocatedResourcesStatus { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22881,6 +23307,19 @@ func (m *ISCSIVolumeSource) Size() (n int) { return n } +func (m *ImageVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Reference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *KeyToPath) Size() (n int) { if m == nil { return 0 @@ -23040,6 +23479,22 @@ func (m *LimitRangeSpec) Size() (n int) { return n } +func (m *LinuxContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.UID)) + n += 1 + sovGenerated(uint64(m.GID)) + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + func (m *List) Size() (n int) { if m == nil { return 0 @@ -23346,6 +23801,18 @@ func (m *NodeDaemonEndpoints) Size() (n int) { return n } +func (m *NodeFeatures) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + n += 2 + } + return n +} + func (m *NodeList) Size() (n int) { if m == nil { return 0 @@ -23398,6 +23865,9 @@ func (m *NodeRuntimeHandlerFeatures) Size() (n int) { if m.RecursiveReadOnlyMounts != nil { n += 2 } + if m.UserNamespaces != nil { + n += 2 + } return n } @@ -23558,6 +24028,10 @@ func (m *NodeStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Features != nil { + l = m.Features.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24321,8 +24795,14 @@ func (m *PodResourceClaim) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ResourceClaimName != nil { + l = len(*m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceClaimTemplateName != nil { + l = len(*m.ResourceClaimTemplateName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24401,6 +24881,10 @@ func (m *PodSecurityContext) Size() (n int) { l = m.AppArmorProfile.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.SupplementalGroupsPolicy != nil { + l = len(*m.SupplementalGroupsPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -25040,6 +25524,8 @@ func (m *ResourceClaim) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -25058,6 +25544,19 @@ func (m *ResourceFieldSelector) Size() (n int) { return n } +func (m *ResourceHealth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Health) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ResourceQuota) Size() (n int) { if m == nil { return 0 @@ -25178,6 +25677,23 @@ func (m *ResourceRequirements) Size() (n int) { return n } +func (m *ResourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *SELinuxOptions) Size() (n int) { if m == nil { return 0 @@ -26221,6 +26737,10 @@ func (m *VolumeSource) Size() (n int) { l = m.Ephemeral.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.Image != nil { + l = m.Image.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -26516,17 +27036,6 @@ func (this *CinderVolumeSource) String() string { }, "") return s } -func (this *ClaimSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClaimSource{`, - `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, - `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, - `}`, - }, "") - return s -} func (this *ClientIPConfig) String() string { if this == nil { return "nil" @@ -26874,6 +27383,11 @@ func (this *ContainerStatus) String() string { repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMountStatus", "VolumeMountStatus", 1), `&`, ``, 1) + "," } repeatedStringForVolumeMounts += "}" + repeatedStringForAllocatedResourcesStatus := "[]ResourceStatus{" + for _, f := range this.AllocatedResourcesStatus { + repeatedStringForAllocatedResourcesStatus += strings.Replace(strings.Replace(f.String(), "ResourceStatus", "ResourceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForAllocatedResourcesStatus += "}" keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) for k := range this.AllocatedResources { keysForAllocatedResources = append(keysForAllocatedResources, string(k)) @@ -26897,6 +27411,18 @@ func (this *ContainerStatus) String() string { `AllocatedResources:` + mapStringForAllocatedResources + `,`, `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`, + `AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`, + `}`, + }, "") + return s +} +func (this *ContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerUser{`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerUser", "LinuxContainerUser", 1) + `,`, `}`, }, "") return s @@ -27480,6 +28006,17 @@ func (this *ISCSIVolumeSource) String() string { }, "") return s } +func (this *ImageVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageVolumeSource{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `}`, + }, "") + return s +} func (this *KeyToPath) String() string { if this == nil { return "nil" @@ -27623,6 +28160,18 @@ func (this *LimitRangeSpec) String() string { }, "") return s } +func (this *LinuxContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxContainerUser{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `}`, + }, "") + return s +} func (this *List) String() string { if this == nil { return "nil" @@ -27871,6 +28420,16 @@ func (this *NodeDaemonEndpoints) String() string { }, "") return s } +func (this *NodeFeatures) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeFeatures{`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, + `}`, + }, "") + return s +} func (this *NodeList) String() string { if this == nil { return "nil" @@ -27914,6 +28473,7 @@ func (this *NodeRuntimeHandlerFeatures) String() string { } s := strings.Join([]string{`&NodeRuntimeHandlerFeatures{`, `RecursiveReadOnlyMounts:` + valueToStringGenerated(this.RecursiveReadOnlyMounts) + `,`, + `UserNamespaces:` + valueToStringGenerated(this.UserNamespaces) + `,`, `}`, }, "") return s @@ -28049,6 +28609,7 @@ func (this *NodeStatus) String() string { `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, `RuntimeHandlers:` + repeatedStringForRuntimeHandlers + `,`, + `Features:` + strings.Replace(this.Features.String(), "NodeFeatures", "NodeFeatures", 1) + `,`, `}`, }, "") return s @@ -28577,7 +29138,8 @@ func (this *PodResourceClaim) String() string { } s := strings.Join([]string{`&PodResourceClaim{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "ClaimSource", "ClaimSource", 1), `&`, ``, 1) + `,`, + `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, + `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, `}`, }, "") return s @@ -28624,6 +29186,7 @@ func (this *PodSecurityContext) String() string { `FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`, `SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`, `AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, `}`, }, "") return s @@ -29125,6 +29688,7 @@ func (this *ResourceClaim) String() string { } s := strings.Join([]string{`&ResourceClaim{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, `}`, }, "") return s @@ -29141,6 +29705,17 @@ func (this *ResourceFieldSelector) String() string { }, "") return s } +func (this *ResourceHealth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHealth{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `Health:` + fmt.Sprintf("%v", this.Health) + `,`, + `}`, + }, "") + return s +} func (this *ResourceQuota) String() string { if this == nil { return "nil" @@ -29259,6 +29834,22 @@ func (this *ResourceRequirements) String() string { }, "") return s } +func (this *ResourceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]ResourceHealth{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceHealth", "ResourceHealth", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&ResourceStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `}`, + }, "") + return s +} func (this *SELinuxOptions) String() string { if this == nil { return "nil" @@ -29967,6 +30558,7 @@ func (this *VolumeSource) String() string { `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, `Ephemeral:` + strings.Replace(this.Ephemeral.String(), "EphemeralVolumeSource", "EphemeralVolumeSource", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ImageVolumeSource", "ImageVolumeSource", 1) + `,`, `}`, }, "") return s @@ -32985,122 +33577,6 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClaimSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClaimSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClaimSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceClaimName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceClaimTemplateName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -37312,6 +37788,162 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &ContainerUser{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{}) + if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerUser{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -43256,11 +43888,369 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := HostPathType(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -43288,8 +44278,8 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := HostPathType(dAtA[iNdEx:postIndex]) - m.Type = &s + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -43312,7 +44302,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -43335,10 +44325,10 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -43590,7 +44580,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SecretRef == nil { - m.SecretRef = &SecretReference{} + m.SecretRef = &LocalObjectReference{} } if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -43670,7 +44660,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ImageVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -43693,182 +44683,15 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ImageVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ImageVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetPortal = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IQN = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - m.Lun = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lun |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -43896,87 +44719,11 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + m.Reference = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DiscoveryCHAPAuth = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SessionCHAPAuth = bool(v != 0) - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44004,8 +44751,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.InitiatorName = &s + m.PullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -45344,17 +46090,218 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LimitRange{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limits = append(m.Limits, LimitRangeItem{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxContainerUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerUser: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LinuxContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var msglen int + m.UID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45364,30 +46311,16 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.UID |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) } - var msglen int + m.GID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45397,110 +46330,87 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.GID |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, LimitRange{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limits = append(m.Limits, LimitRangeItem{}) - if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47950,17 +48860,100 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeFeatures) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeFeatures: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeFeatures: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47970,25 +48963,13 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.SupplementalGroupsPolicy = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48377,6 +49358,27 @@ func (m *NodeRuntimeHandlerFeatures) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.RecursiveReadOnlyMounts = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaces", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.UserNamespaces = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -49637,6 +50639,42 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Features == nil { + m.Features = &NodeFeatures{} + } + if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -56370,11 +57408,11 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56384,24 +57422,57 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimTemplateName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -57018,6 +58089,39 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex]) + m.SupplementalGroupsPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -62585,7 +63689,121 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaim) Unmarshal(dAtA []byte) error { +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -62608,15 +63826,15 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -62644,7 +63862,72 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -62667,7 +63950,7 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { +func (m *ResourceHealth) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -62690,15 +63973,15 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceHealth: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceHealth: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -62726,11 +64009,11 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerName = string(dAtA[iNdEx:postIndex]) + m.ResourceID = ResourceID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -62758,40 +64041,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Health = ResourceHealthStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -63977,6 +65227,122 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceHealth{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -73029,6 +74395,42 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageVolumeSource{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index f3b47c722d..68ac80ed0b 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -126,20 +126,24 @@ message AzureDiskVolumeSource { // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) optional string cachingMode = 3; // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" optional string fsType = 4; // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false optional bool readOnly = 5; // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) optional string kind = 6; } @@ -182,7 +186,7 @@ message Binding { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The target object that you want to bind to the standard object. optional ObjectReference target = 2; @@ -422,30 +426,6 @@ message CinderVolumeSource { optional LocalObjectReference secretRef = 4; } -// ClaimSource describes a reference to a ResourceClaim. -// -// Exactly one of these fields should be set. Consumers of this type must -// treat an empty object as if it has an unknown value. -message ClaimSource { - // ResourceClaimName is the name of a ResourceClaim object in the same - // namespace as this pod. - optional string resourceClaimName = 1; - - // ResourceClaimTemplateName is the name of a ResourceClaimTemplate - // object in the same namespace as this pod. - // - // The template will be used to create a new ResourceClaim, which will - // be bound to this pod. When this pod is deleted, the ResourceClaim - // will also be deleted. The pod name and resource name, along with a - // generated component, will be used to form a unique name for the - // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - // - // This field is immutable and no changes will be made to the - // corresponding ResourceClaim by the control plane after creating the - // ResourceClaim. - optional string resourceClaimTemplateName = 2; -} - // ClientIPConfig represents the configurations of Client IP based session affinity. message ClientIPConfig { // timeoutSeconds specifies the seconds of ClientIP type session sticky time. @@ -475,7 +455,7 @@ message ClusterTrustBundleProjection { // interpreted as "match nothing". If set but empty, interpreted as "match // everything". // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; // If true, don't block pod startup if the referenced ClusterTrustBundle(s) // aren't available. If using name, then the named ClusterTrustBundle is @@ -516,7 +496,7 @@ message ComponentStatus { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // List of component conditions observed // +optional @@ -533,7 +513,7 @@ message ComponentStatusList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ComponentStatus objects. repeated ComponentStatus items = 2; @@ -544,7 +524,7 @@ message ConfigMap { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the ConfigMap cannot // be updated (only object metadata can be modified). @@ -604,7 +584,7 @@ message ConfigMapKeySelector { message ConfigMapList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ConfigMaps. repeated ConfigMap items = 2; @@ -984,7 +964,7 @@ message ContainerState { message ContainerStateRunning { // Time at which the container was last (re-)started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; } // ContainerStateTerminated is a terminated state of a container. @@ -1006,11 +986,11 @@ message ContainerStateTerminated { // Time at which previous execution of the container started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; // Time at which the container last terminated // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; // Container's ID in the format '://' // +optional @@ -1093,7 +1073,7 @@ message ContainerStatus { // and after successfully admitting desired pod resize. // +featureGate=InPlacePodVerticalScaling // +optional - map allocatedResources = 10; + map allocatedResources = 10; // Resources represents the compute resource requests and limits that have been successfully // enacted on the running container after it has been started or has been successfully resized. @@ -1109,6 +1089,29 @@ message ContainerStatus { // +listMapKey=mountPath // +featureGate=RecursiveReadOnlyMounts repeated VolumeMountStatus volumeMounts = 12; + + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + optional ContainerUser user = 13; + + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + repeated ResourceStatus allocatedResourcesStatus = 14; +} + +// ContainerUser represents user identity information +message ContainerUser { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + optional LinuxContainerUser linux = 1; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1188,7 +1191,7 @@ message EmptyDirVolumeSource { // The default is nil which means that the limit is undefined. // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } // EndpointAddress is a tuple that describes single IP address. @@ -1300,7 +1303,7 @@ message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -1319,7 +1322,7 @@ message EndpointsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of endpoints. repeated Endpoints items = 2; @@ -1622,7 +1625,7 @@ message EphemeralVolumeSource { message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. optional ObjectReference involvedObject = 2; @@ -1644,11 +1647,11 @@ message Event { // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; // The time at which the most recent occurrence of this event was recorded. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; // The number of times this event has occurred. // +optional @@ -1660,7 +1663,7 @@ message Event { // Time when this Event was first observed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; // Data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -1688,7 +1691,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of events repeated Event items = 2; @@ -1701,7 +1704,7 @@ message EventSeries { optional int32 count = 1; // Time of the last occurrence observed - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } // EventSource contains information for an event. @@ -1954,7 +1957,7 @@ message HTTPGetAction { // Name or number of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // Host name to connect to, defaults to the pod IP. You probably want to set // "Host" in httpHeaders instead. @@ -1997,6 +2000,7 @@ message HostAlias { // HostIP represents a single IP address allocated to the host. message HostIP { // IP is the IP address assigned to the host + // +required optional string ip = 1; } @@ -2032,6 +2036,7 @@ message ISCSIPersistentVolumeSource { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; // fsType is the filesystem type of the volume that you want to mount. @@ -2089,6 +2094,7 @@ message ISCSIVolumeSource { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; // fsType is the filesystem type of the volume that you want to mount. @@ -2129,6 +2135,26 @@ message ISCSIVolumeSource { optional string initiatorName = 12; } +// ImageVolumeSource represents a image volume resource. +message ImageVolumeSource { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + optional string reference = 1; + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + optional string pullPolicy = 2; +} + // Maps a string key to a path within a volume. message KeyToPath { // key is the key to project. @@ -2202,7 +2228,7 @@ message LimitRange { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2217,23 +2243,23 @@ message LimitRangeItem { // Max usage constraints on this kind by resource name. // +optional - map max = 2; + map max = 2; // Min usage constraints on this kind by resource name. // +optional - map min = 3; + map min = 3; // Default resource requirement limit value by resource name if resource limit is omitted. // +optional - map default = 4; + map default = 4; // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. // +optional - map defaultRequest = 5; + map defaultRequest = 5; // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. // +optional - map maxLimitRequestRatio = 6; + map maxLimitRequestRatio = 6; } // LimitRangeList is a list of LimitRange items. @@ -2241,7 +2267,7 @@ message LimitRangeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of LimitRange objects. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -2255,15 +2281,29 @@ message LimitRangeSpec { repeated LimitRangeItem limits = 1; } +// LinuxContainerUser represents user identity information in Linux containers +message LinuxContainerUser { + // UID is the primary uid initially attached to the first process in the container + optional int64 uid = 1; + + // GID is the primary gid initially attached to the first process in the container + optional int64 gid = 2; + + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + repeated int64 supplementalGroups = 3; +} + // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // LoadBalancerIngress represents the status of a load-balancer ingress point: @@ -2377,7 +2417,7 @@ message Namespace { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2399,7 +2439,7 @@ message NamespaceCondition { optional string status = 2; // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // +optional optional string reason = 5; @@ -2413,7 +2453,7 @@ message NamespaceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ @@ -2451,7 +2491,7 @@ message Node { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2509,11 +2549,11 @@ message NodeCondition { // Last time we got an update on a given condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -2588,12 +2628,21 @@ message NodeDaemonEndpoints { optional DaemonEndpoint kubeletEndpoint = 1; } +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +message NodeFeatures { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + optional bool supplementalGroupsPolicy = 1; +} + // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of nodes repeated Node items = 2; @@ -2618,12 +2667,17 @@ message NodeRuntimeHandler { optional NodeRuntimeHandlerFeatures features = 2; } -// NodeRuntimeHandlerFeatures is a set of runtime features. +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. message NodeRuntimeHandlerFeatures { // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. // +featureGate=RecursiveReadOnlyMounts // +optional optional bool recursiveReadOnlyMounts = 1; + + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + optional bool userNamespaces = 2; } // A node selector represents the union of the results of one or more label queries @@ -2713,14 +2767,14 @@ message NodeSpec { // NodeStatus is information about the current status of a node. message NodeStatus { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional - map capacity = 1; + map capacity = 1; // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. // +optional - map allocatable = 2; + map allocatable = 2; // NodePhase is the recently observed lifecycle phase of the node. // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase @@ -2784,9 +2838,15 @@ message NodeStatus { // The available runtime handlers. // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport // +optional // +listType=atomic repeated NodeRuntimeHandler runtimeHandlers = 12; + + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + optional NodeFeatures features = 13; } // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. @@ -2816,7 +2876,7 @@ message NodeSystemInfo { // Kubelet Version reported by the node. optional string kubeletVersion = 7; - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. optional string kubeProxyVersion = 8; // The Operating System reported by the node @@ -2904,7 +2964,7 @@ message PersistentVolume { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. @@ -2925,7 +2985,7 @@ message PersistentVolumeClaim { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the desired characteristics of a volume requested by a pod author. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims @@ -2947,11 +3007,11 @@ message PersistentVolumeClaimCondition { // lastProbeTime is the time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // lastTransitionTime is the time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // reason is a unique, this should be a short, machine understandable string that gives the reason // for condition's last transition. If it reports "Resizing" that means the underlying @@ -2969,7 +3029,7 @@ message PersistentVolumeClaimList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of persistent volume claims. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims @@ -2987,7 +3047,7 @@ message PersistentVolumeClaimSpec { // selector is a label query over volumes to consider for binding. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // resources represents the minimum resources the volume should have. // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements @@ -3059,7 +3119,7 @@ message PersistentVolumeClaimSpec { // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 9; @@ -3079,7 +3139,7 @@ message PersistentVolumeClaimStatus { // capacity represents the actual resources of the underlying volume. // +optional - map capacity = 3; + map capacity = 3; // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being // resized then the Condition will be set to 'Resizing'. @@ -3114,7 +3174,7 @@ message PersistentVolumeClaimStatus { // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure // +optional - map allocatedResources = 5; + map allocatedResources = 5; // allocatedResourceStatuses stores status of resource being resized for the given PVC. // Key names follow standard Kubernetes label syntax. Valid values are either: @@ -3158,14 +3218,14 @@ message PersistentVolumeClaimStatus { // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string currentVolumeAttributesClassName = 8; // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional ModifyVolumeStatus modifyVolumeStatus = 9; @@ -3179,7 +3239,7 @@ message PersistentVolumeClaimTemplate { // validation. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The specification for the PersistentVolumeClaim. The entire content is // copied unchanged into the PVC that gets created from this @@ -3208,7 +3268,7 @@ message PersistentVolumeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of persistent volumes. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes @@ -3327,7 +3387,7 @@ message PersistentVolumeSpec { // capacity is the description of the persistent volume's resources and capacity. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional - map capacity = 1; + map capacity = 1; // persistentVolumeSource is the actual volume backing the persistent volume. optional PersistentVolumeSource persistentVolumeSource = 2; @@ -3382,7 +3442,7 @@ message PersistentVolumeSpec { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 10; @@ -3406,10 +3466,8 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). - // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; } // Represents a Photon Controller persistent disk resource. @@ -3429,7 +3487,7 @@ message Pod { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -3482,7 +3540,7 @@ message PodAffinityTerm { // A label query over a set of resources, in this case pods. // If it's null, this PodAffinityTerm matches with no Pods. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; // namespaces specifies a static list of namespace names that the term applies to. // The term is applied to the union of the namespaces listed in this field @@ -3505,7 +3563,7 @@ message PodAffinityTerm { // null selector and null or empty namespaces list means "this pod's namespace". // An empty selector ({}) matches all namespaces. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; // MatchLabelKeys is a set of pod label keys to select which pods will // be taken into consideration. The keys are used to lookup values from the @@ -3515,7 +3573,8 @@ message PodAffinityTerm { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both matchLabelKeys and labelSelector. // Also, matchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional repeated string matchLabelKeys = 5; @@ -3528,7 +3587,8 @@ message PodAffinityTerm { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional repeated string mismatchLabelKeys = 6; @@ -3607,11 +3667,11 @@ message PodCondition { // Last time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -3693,6 +3753,7 @@ message PodExecOptions { // PodIP represents a single IP address allocated to the pod. message PodIP { // IP is the IP address assigned to the pod + // +required optional string ip = 1; } @@ -3701,7 +3762,7 @@ message PodList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md @@ -3734,7 +3795,7 @@ message PodLogOptions { // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. @@ -3798,7 +3859,10 @@ message PodReadinessGate { optional string conditionType = 1; } -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. // Containers that need access to the ResourceClaim reference it with this name. message PodResourceClaim { @@ -3806,8 +3870,29 @@ message PodResourceClaim { // This must be a DNS_LABEL. optional string name = 1; - // Source describes where to find the ResourceClaim. - optional ClaimSource source = 2; + // ResourceClaimName is the name of a ResourceClaim object in the same + // namespace as this pod. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimName = 3; + + // ResourceClaimTemplateName is the name of a ResourceClaimTemplate + // object in the same namespace as this pod. + // + // The template will be used to create a new ResourceClaim, which will + // be bound to this pod. When this pod is deleted, the ResourceClaim + // will also be deleted. The pod name and resource name, along with a + // generated component, will be used to form a unique name for the + // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + // + // This field is immutable and no changes will be made to the + // corresponding ResourceClaim by the control plane after creating the + // ResourceClaim. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimTemplateName = 4; } // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim @@ -3820,7 +3905,7 @@ message PodResourceClaimStatus { optional string name = 1; // ResourceClaimName is the name of the ResourceClaim that was - // generated for the Pod in the namespace of the Pod. It this is + // generated for the Pod in the namespace of the Pod. If this is // unset, then generating a ResourceClaim was not necessary. The // pod.spec.resourceClaims entry can be ignored in this case. // @@ -3882,17 +3967,29 @@ message PodSecurityContext { // +optional optional bool runAsNonRoot = 3; - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID, the fsGroup (if specified), and group memberships - // defined in the container image for the uid of the container process. If unspecified, - // no additional groups are added to any container. Note that group memberships - // defined in the container image for the uid of the container process are still effective, - // even if they are not included in this list. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional // +listType=atomic repeated int64 supplementalGroups = 4; + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + optional string supplementalGroupsPolicy = 12; + // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -3939,7 +4036,7 @@ message PodSecurityContext { message PodSignature { // Reference to controller whose pods should avoid this node. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; } // PodSpec is a description of a pod. @@ -4048,9 +4145,11 @@ message PodSpec { // +optional optional bool automountServiceAccountToken = 21; - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional optional string nodeName = 10; @@ -4189,7 +4288,7 @@ message PodSpec { // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional - map overhead = 32; + map overhead = 32; // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. @@ -4230,6 +4329,7 @@ message PodSpec { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile @@ -4375,7 +4475,7 @@ message PodStatus { // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have @@ -4423,7 +4523,7 @@ message PodStatusResult { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Most recently observed status of the pod. // This data may not be up to date. @@ -4439,7 +4539,7 @@ message PodTemplate { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4452,7 +4552,7 @@ message PodTemplateList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pod templates repeated PodTemplate items = 2; @@ -4463,7 +4563,7 @@ message PodTemplateSpec { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4525,7 +4625,7 @@ message PreferAvoidPodsEntry { // Time at which this entry was added to the list. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; // (brief) reason why this entry was added to the list. // +optional @@ -4614,7 +4714,8 @@ message ProbeHandler { // Represents a projected volume source message ProjectedVolumeSource { - // sources is the list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional // +listType=atomic repeated VolumeProjection sources = 1; @@ -4685,18 +4786,21 @@ message RBDPersistentVolumeSource { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; // secretRef is name of the authentication secret for RBDUser. If provided @@ -4737,18 +4841,21 @@ message RBDVolumeSource { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; // secretRef is name of the authentication secret for RBDUser. If provided @@ -4770,7 +4877,7 @@ message RangeAllocation { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Range is string that identifies the range represented by 'data'. optional string range = 2; @@ -4785,7 +4892,7 @@ message ReplicationController { // be the same as the Pod(s) that the replication controller manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4811,7 +4918,7 @@ message ReplicationControllerCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -4827,7 +4934,7 @@ message ReplicationControllerList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of replication controllers. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -4904,6 +5011,13 @@ message ResourceClaim { // the Pod where this field is used. It makes that resource available // inside a container. optional string name = 1; + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + optional string request = 2; } // ResourceFieldSelector represents container resources (cpu, memory) and their output format @@ -4918,7 +5032,26 @@ message ResourceFieldSelector { // Specifies the output format of the exposed resources, defaults to "1" // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +message ResourceHealth { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + optional string resourceID = 1; + + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + optional string health = 2; } // ResourceQuota sets aggregate quota restrictions enforced per namespace @@ -4926,7 +5059,7 @@ message ResourceQuota { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4944,7 +5077,7 @@ message ResourceQuotaList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ @@ -4956,7 +5089,7 @@ message ResourceQuotaSpec { // hard is the set of desired hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. @@ -4976,11 +5109,11 @@ message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // Used is the current observed total usage of the resource in the namespace. // +optional - map used = 2; + map used = 2; } // ResourceRequirements describes the compute resource requirements. @@ -4988,14 +5121,14 @@ message ResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map requests = 2; + map requests = 2; // Claims lists the names of resources, defined in spec.resourceClaims, // that are used by this container. @@ -5012,6 +5145,20 @@ message ResourceRequirements { repeated ResourceClaim claims = 3; } +message ResourceStatus { + // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // +required + optional string name = 1; + + // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. + // At a minimum, ResourceID must uniquely identify the Resource + // allocated to the Pod on the Node for the lifetime of a Pod. + // See ResourceID type for it's definition. + // +listType=map + // +listMapKey=resourceID + repeated ResourceHealth resources = 2; +} + // SELinuxOptions are the labels to be applied to the container message SELinuxOptions { // User is a SELinux user label that applies to the container. @@ -5058,6 +5205,7 @@ message ScaleIOPersistentVolumeSource { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; // volumeName is the name of a volume already created in the ScaleIO system @@ -5069,6 +5217,7 @@ message ScaleIOPersistentVolumeSource { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" optional string fsType = 9; // readOnly defaults to false (read/write). ReadOnly here will force @@ -5104,6 +5253,7 @@ message ScaleIOVolumeSource { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; // volumeName is the name of a volume already created in the ScaleIO system @@ -5115,6 +5265,7 @@ message ScaleIOVolumeSource { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" optional string fsType = 9; // readOnly Defaults to false (read/write). ReadOnly here will force @@ -5179,7 +5330,7 @@ message Secret { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the Secret cannot // be updated (only object metadata can be modified). @@ -5242,7 +5393,7 @@ message SecretList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -5398,7 +5549,7 @@ message SecurityContext { optional bool allowPrivilegeEscalation = 7; // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -5433,7 +5584,7 @@ message Service { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -5456,7 +5607,7 @@ message ServiceAccount { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". @@ -5489,7 +5640,7 @@ message ServiceAccountList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ServiceAccounts. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ @@ -5527,7 +5678,7 @@ message ServiceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of services repeated Service items = 2; @@ -5579,7 +5730,7 @@ message ServicePort { // omitted or set equal to the 'port' field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; // The port on each node on which this service is exposed when type is // NodePort or LoadBalancer. Usually assigned by the system. If a value is @@ -5864,7 +6015,7 @@ message ServiceStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. @@ -5958,7 +6109,7 @@ message TCPSocketAction { // Number or name of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; // Optional: Host name to connect to, defaults to the pod IP. // +optional @@ -5983,7 +6134,7 @@ message Taint { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; } // The pod this Toleration is attached to tolerates any taint that matches @@ -6107,7 +6258,7 @@ message TopologySpreadConstraint { // Pods that match this label selector are counted to determine the number of pods // in their corresponding topology domain. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; // MinDomains indicates a minimum number of eligible domains. // When the number of eligible domains with matching topology keys is less than minDomains, @@ -6313,7 +6464,8 @@ message VolumeNodeAffinity { optional NodeSelector required = 1; } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. message VolumeProjection { // secret information about the secret data to project // +optional @@ -6355,14 +6507,14 @@ message VolumeResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map requests = 2; + map requests = 2; } // Represents the source of a volume to mount. @@ -6530,6 +6682,24 @@ message VolumeSource { // // +optional optional EphemeralVolumeSource ephemeral = 29; + + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + optional ImageVolumeSource image = 30; } // Represents a vSphere volume resource. diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 328df9a7b7..3a74138bae 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -181,6 +181,23 @@ type VolumeSource struct { // // +optional Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"` + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -295,6 +312,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. @@ -371,7 +389,7 @@ type PersistentVolumeSpec struct { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` @@ -425,13 +443,12 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). - // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { @@ -447,6 +464,7 @@ type PersistentVolumeList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { @@ -469,6 +487,7 @@ type PersistentVolumeClaim struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { @@ -557,7 +576,7 @@ type PersistentVolumeClaimSpec struct { // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` @@ -581,15 +600,29 @@ type TypedObjectReference struct { Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` } -// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +// PersistentVolumeClaimConditionType defines the condition of PV claim. +// Valid values are: +// - "Resizing", "FileSystemResizePending" +// +// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected: +// - "ControllerResizeError", "NodeResizeError" +// +// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected: +// - "ModifyVolumeError", "ModifyingVolume" type PersistentVolumeClaimConditionType string +// These are valid conditions of PVC const ( // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" + // PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller + PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError" + // PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node. + PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError" + // Applying the target VolumeAttributesClass encountered an error PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" // Volume is being modified @@ -606,18 +639,19 @@ const ( // State set when resize controller starts resizing the volume in control-plane. PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress" - // State set when resize has failed in resize controller with a terminal error. + // State set when resize has failed in resize controller with a terminal unrecoverable error. // Transient errors such as timeout should not set this status and should leave allocatedResourceStatus // unmodified, so as resize controller can resume the volume expansion. - PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed" + PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible" // State set when resize controller has finished resizing the volume but further resizing of volume // is needed on the node. PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending" // State set when kubelet starts resizing the volume. PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress" - // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed - PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" + // State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors + // shouldn't set this status + PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible" ) // +enum @@ -763,13 +797,13 @@ type PersistentVolumeClaimStatus struct { AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` @@ -943,16 +977,19 @@ type RBDVolumeSource struct { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. @@ -988,16 +1025,19 @@ type RBDPersistentVolumeSource struct { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. @@ -1426,6 +1466,7 @@ type ISCSIVolumeSource struct { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -1473,6 +1514,7 @@ type ISCSIPersistentVolumeSource struct { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -1613,17 +1655,21 @@ type AzureDiskVolumeSource struct { DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` } @@ -1662,6 +1708,7 @@ type ScaleIOVolumeSource struct { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. @@ -1671,6 +1718,7 @@ type ScaleIOVolumeSource struct { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. @@ -1699,6 +1747,7 @@ type ScaleIOPersistentVolumeSource struct { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. @@ -1708,6 +1757,7 @@ type ScaleIOPersistentVolumeSource struct { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. @@ -1891,7 +1941,8 @@ type ClusterTrustBundleProjection struct { // Represents a projected volume source type ProjectedVolumeSource struct { - // sources is the list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional // +listType=atomic Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` @@ -1905,10 +1956,9 @@ type ProjectedVolumeSource struct { DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - // secret information about the secret data to project // +optional Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` @@ -2631,6 +2681,13 @@ type ResourceClaim struct { // the Pod where this field is used. It makes that resource available // inside a container. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"` } const ( @@ -3030,6 +3087,93 @@ type ContainerStatus struct { // +listMapKey=mountPath // +featureGate=RecursiveReadOnlyMounts VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"` + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"` + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"` +} + +type ResourceStatus struct { + // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // +required + Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"` + // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. + // At a minimum, ResourceID must uniquely identify the Resource + // allocated to the Pod on the Node for the lifetime of a Pod. + // See ResourceID type for it's definition. + // +listType=map + // +listMapKey=resourceID + Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` +} + +type ResourceHealthStatus string + +const ( + ResourceHealthStatusHealthy ResourceHealthStatus = "Healthy" + ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy" + ResourceHealthStatusUnknown ResourceHealthStatus = "Unknown" +) + +// ResourceID is calculated based on the source of this resource health information. +// For DevicePlugin: +// +// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73 +// +// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node. +// For DRA: +// +// dra://: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. +type ResourceID string + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +type ResourceHealth struct { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"` + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"` +} + +// ContainerUser represents user identity information +type ContainerUser struct { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"` + + // Windows holds user identity information initially attached to the first process of the containers in Windows + // This is just reserved for future use. + // Windows *WindowsContainerUser +} + +// LinuxContainerUser represents user identity information in Linux containers +type LinuxContainerUser struct { + // UID is the primary uid initially attached to the first process in the container + UID int64 `json:"uid" protobuf:"varint,1,name=uid"` + // GID is the primary gid initially attached to the first process in the container + GID int64 `json:"gid" protobuf:"varint,2,name=gid"` + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"` } // PodPhase is a label for the condition of a pod at the current time. @@ -3426,7 +3570,8 @@ type PodAffinityTerm struct { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both matchLabelKeys and labelSelector. // Also, matchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` @@ -3438,7 +3583,8 @@ type PodAffinityTerm struct { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` @@ -3667,9 +3813,11 @@ type PodSpec struct { // +optional AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. @@ -3826,6 +3974,7 @@ type PodSpec struct { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile @@ -3883,7 +4032,10 @@ type PodSpec struct { ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` } -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. // Containers that need access to the ResourceClaim reference it with this name. type PodResourceClaim struct { @@ -3891,18 +4043,17 @@ type PodResourceClaim struct { // This must be a DNS_LABEL. Name string `json:"name" protobuf:"bytes,1,name=name"` - // Source describes where to find the ResourceClaim. - Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` -} + // Source is tombstoned since Kubernetes 1.31 where it got replaced with + // the inlined fields below. + // + // Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` -// ClaimSource describes a reference to a ResourceClaim. -// -// Exactly one of these fields should be set. Consumers of this type must -// treat an empty object as if it has an unknown value. -type ClaimSource struct { // ResourceClaimName is the name of a ResourceClaim object in the same // namespace as this pod. - ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,1,opt,name=resourceClaimName"` + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"` // ResourceClaimTemplateName is the name of a ResourceClaimTemplate // object in the same namespace as this pod. @@ -3916,7 +4067,10 @@ type ClaimSource struct { // This field is immutable and no changes will be made to the // corresponding ResourceClaim by the control plane after creating the // ResourceClaim. - ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"` + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"` } // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim @@ -3929,7 +4083,7 @@ type PodResourceClaimStatus struct { Name string `json:"name" protobuf:"bytes,1,name=name"` // ResourceClaimName is the name of the ResourceClaim that was - // generated for the Pod in the namespace of the Pod. It this is + // generated for the Pod in the namespace of the Pod. If this is // unset, then generating a ResourceClaim was not necessary. The // pod.spec.resourceClaims entry can be ignored in this case. // @@ -4137,6 +4291,23 @@ const ( FSGroupChangeAlways PodFSGroupChangePolicy = "Always" ) +// SupplementalGroupsPolicy defines how supplemental groups +// of the first container processes are calculated. +// +enum +type SupplementalGroupsPolicy string + +const ( + // SupplementalGroupsPolicyMerge means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // merged with the primary user's groups as defined in the container image + // (in /etc/group). + SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge" + // SupplementalGroupsPolicyStrict means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // used instead of any groups defined in the container image. + SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict" +) + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -4179,16 +4350,27 @@ type PodSecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID, the fsGroup (if specified), and group memberships - // defined in the container image for the uid of the container process. If unspecified, - // no additional groups are added to any container. Note that group memberships - // defined in the container image for the uid of the container process are still effective, - // even if they are not included in this list. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional // +listType=atomic SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"` // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -4340,13 +4522,15 @@ type PodDNSConfigOption struct { // PodIP represents a single IP address allocated to the pod. type PodIP struct { // IP is the IP address assigned to the pod - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // HostIP represents a single IP address allocated to the host. type HostIP struct { // IP is the IP address assigned to the host - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // EphemeralContainerCommon is a copy of all fields in Container to be inlined in @@ -4663,6 +4847,7 @@ type PodStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { @@ -4683,6 +4868,7 @@ type PodStatusResult struct { // +genclient // +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Pod is a collection of containers that can run on a host. This resource is created // by clients and scheduled onto hosts. @@ -4708,6 +4894,7 @@ type Pod struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodList is a list of Pods. type PodList struct { @@ -4737,6 +4924,7 @@ type PodTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { @@ -4753,6 +4941,7 @@ type PodTemplate struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { @@ -4867,6 +5056,7 @@ type ReplicationControllerCondition struct { // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { @@ -4893,6 +5083,7 @@ type ReplicationController struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { @@ -5437,6 +5628,7 @@ type ServicePort struct { // +genclient // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Service is a named abstraction of software service (for example, mysql) consisting of local port // (for example 3306) that the proxy listens on, and the selector that determines which pods @@ -5468,6 +5660,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceList holds a list of services. type ServiceList struct { @@ -5484,6 +5677,7 @@ type ServiceList struct { // +genclient // +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccount binds together: // * a name, understood by users, and perhaps by peripheral systems, for an identity @@ -5523,6 +5717,7 @@ type ServiceAccount struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { @@ -5539,6 +5734,7 @@ type ServiceAccountList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Endpoints is a collection of endpoints that implement the actual service. Example: // @@ -5660,6 +5856,7 @@ type EndpointPort struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EndpointsList is a list of endpoints. type EndpointsList struct { @@ -5772,13 +5969,16 @@ type NodeDaemonEndpoints struct { KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` } -// NodeRuntimeHandlerFeatures is a set of runtime features. +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. type NodeRuntimeHandlerFeatures struct { // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. // +featureGate=RecursiveReadOnlyMounts // +optional RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"` - // Reserved: UserNamespaces *bool (varint 2, for consistency with CRI API) + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"` } // NodeRuntimeHandler is a set of runtime handler information. @@ -5792,6 +5992,15 @@ type NodeRuntimeHandler struct { Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"` } +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +type NodeFeatures struct { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"` +} + // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. type NodeSystemInfo struct { // MachineID reported by the node. For unique machine identification @@ -5812,7 +6021,7 @@ type NodeSystemInfo struct { ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` // Kubelet Version reported by the node. KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` // The Operating System reported by the node OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` @@ -5870,7 +6079,7 @@ type NodeConfigStatus struct { // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // Allocatable represents the resources of a node that are available for scheduling. @@ -5930,9 +6139,14 @@ type NodeStatus struct { Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"` // The available runtime handlers. // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport // +optional // +listType=atomic RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"` + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"` } type UniqueVolumeName string @@ -6128,6 +6342,7 @@ type ResourceList map[ResourceName]resource.Quantity // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). @@ -6152,6 +6367,7 @@ type Node struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { @@ -6250,6 +6466,7 @@ type NamespaceCondition struct { // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Namespace provides a scope for Names. // Use of multiple namespaces is optional. @@ -6272,6 +6489,7 @@ type Namespace struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NamespaceList is a list of Namespaces. type NamespaceList struct { @@ -6287,6 +6505,7 @@ type NamespaceList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. // Deprecated in 1.7, please use the bindings subresource of pods instead. @@ -6311,6 +6530,7 @@ type Preconditions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodLogOptions is the query options for a Pod's logs REST call. type PodLogOptions struct { @@ -6363,6 +6583,7 @@ type PodLogOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.1 // PodAttachOptions is the query options to a Pod's remote attach call. // --- @@ -6401,6 +6622,7 @@ type PodAttachOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodExecOptions is the query options to a Pod's remote exec call. // --- @@ -6439,6 +6661,7 @@ type PodExecOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // PodPortForwardOptions is the query options to a Pod's port forward call // when using WebSockets. @@ -6458,6 +6681,7 @@ type PodPortForwardOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodProxyOptions is the query options to a Pod's proxy call. type PodProxyOptions struct { @@ -6470,6 +6694,7 @@ type PodProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // NodeProxyOptions is the query options to a Node's proxy call. type NodeProxyOptions struct { @@ -6482,6 +6707,7 @@ type NodeProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ServiceProxyOptions is the query options to a Service's proxy call. type ServiceProxyOptions struct { @@ -6584,6 +6810,7 @@ type TypedLocalObjectReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SerializedReference is a reference to serialized object. type SerializedReference struct { @@ -6613,6 +6840,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Event is a report of an event somewhere in the cluster. Events // have a limited retention time and triggers and messages may evolve @@ -6697,6 +6925,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EventList is a list of events. type EventList struct { @@ -6711,6 +6940,7 @@ type EventList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // List holds a list of objects, which may not be known by the server. type List metav1.List @@ -6758,6 +6988,7 @@ type LimitRangeSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRange sets resource usage limits for each kind of resource in a Namespace. type LimitRange struct { @@ -6774,6 +7005,7 @@ type LimitRange struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { @@ -6822,6 +7054,8 @@ const ( ResourceLimitsMemory ResourceName = "limits.memory" // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" + // resource.k8s.io devices requested with a certain DeviceClass, number + ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices" ) // The following identify resource prefix for Kubernetes object types @@ -6922,6 +7156,7 @@ type ResourceQuotaStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { @@ -6943,6 +7178,7 @@ type ResourceQuota struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { @@ -6959,6 +7195,7 @@ type ResourceQuotaList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. @@ -7085,6 +7322,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SecretList is a list of Secret. type SecretList struct { @@ -7101,6 +7339,7 @@ type SecretList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMap holds configuration data for pods to consume. type ConfigMap struct { @@ -7137,6 +7376,7 @@ type ConfigMap struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { @@ -7179,6 +7419,7 @@ type ComponentCondition struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ComponentStatus (and ComponentStatusList) holds the cluster validation info. // Deprecated: This API is deprecated in v1.19+ @@ -7199,6 +7440,7 @@ type ComponentStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Status of all the conditions for the component as a list of ComponentStatus objects. // Deprecated: This API is deprecated in v1.19+ @@ -7332,7 +7574,7 @@ type SecurityContext struct { // +optional AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -7410,6 +7652,7 @@ type WindowsSecurityContextOptions struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // RangeAllocation is not a public type. type RangeAllocation struct { @@ -7519,3 +7762,23 @@ const ( // the destination set to the node's IP and port or the pod's IP and port. LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" ) + +// ImageVolumeSource represents a image volume resource. +type ImageVolumeSource struct { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"` +} diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index c54f2a2fe5..950806ef8e 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -219,16 +219,6 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string { return map_CinderVolumeSource } -var map_ClaimSource = map[string]string{ - "": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", - "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", - "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", -} - -func (ClaimSource) SwaggerDoc() map[string]string { - return map_ClaimSource -} - var map_ClientIPConfig = map[string]string{ "": "ClientIPConfig represents the configurations of Client IP based session affinity.", "timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", @@ -469,25 +459,36 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", - "state": "State holds details about the container's current condition.", - "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", - "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", - "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", - "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", - "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", - "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", - "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", - "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", - "volumeMounts": "Status of volume mounts.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", + "volumeMounts": "Status of volume mounts.", + "user": "User represents user identity information initially attached to the first process of the container", + "allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.", } func (ContainerStatus) SwaggerDoc() map[string]string { return map_ContainerStatus } +var map_ContainerUser = map[string]string{ + "": "ContainerUser represents user identity information", + "linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.", +} + +func (ContainerUser) SwaggerDoc() map[string]string { + return map_ContainerUser +} + var map_DaemonEndpoint = map[string]string{ "": "DaemonEndpoint contains information about a single Daemon endpoint.", "Port": "Port number of the given endpoint.", @@ -933,6 +934,16 @@ func (ISCSIVolumeSource) SwaggerDoc() map[string]string { return map_ISCSIVolumeSource } +var map_ImageVolumeSource = map[string]string{ + "": "ImageVolumeSource represents a image volume resource.", + "reference": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", +} + +func (ImageVolumeSource) SwaggerDoc() map[string]string { + return map_ImageVolumeSource +} + var map_KeyToPath = map[string]string{ "": "Maps a string key to a path within a volume.", "key": "key is the key to project.", @@ -1009,6 +1020,17 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string { return map_LimitRangeSpec } +var map_LinuxContainerUser = map[string]string{ + "": "LinuxContainerUser represents user identity information in Linux containers", + "uid": "UID is the primary uid initially attached to the first process in the container", + "gid": "GID is the primary gid initially attached to the first process in the container", + "supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container", +} + +func (LinuxContainerUser) SwaggerDoc() map[string]string { + return map_LinuxContainerUser +} + var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", @@ -1195,6 +1217,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { return map_NodeDaemonEndpoints } +var map_NodeFeatures = map[string]string{ + "": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", + "supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", +} + +func (NodeFeatures) SwaggerDoc() map[string]string { + return map_NodeFeatures +} + var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -1225,8 +1256,9 @@ func (NodeRuntimeHandler) SwaggerDoc() map[string]string { } var map_NodeRuntimeHandlerFeatures = map[string]string{ - "": "NodeRuntimeHandlerFeatures is a set of runtime features.", + "": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", "recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", + "userNamespaces": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.", } func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string { @@ -1280,7 +1312,7 @@ func (NodeSpec) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", @@ -1292,6 +1324,7 @@ var map_NodeStatus = map[string]string{ "volumesAttached": "List of volumes that are attached to the node.", "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.", "runtimeHandlers": "The available runtime handlers.", + "features": "Features describes the set of features implemented by the CRI implementation.", } func (NodeStatus) SwaggerDoc() map[string]string { @@ -1307,7 +1340,7 @@ var map_NodeSystemInfo = map[string]string{ "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", "kubeletVersion": "Kubelet Version reported by the node.", - "kubeProxyVersion": "KubeProxy Version reported by the node.", + "kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.", "operatingSystem": "The Operating System reported by the node", "architecture": "The Architecture reported by the node", } @@ -1395,7 +1428,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", - "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1410,8 +1443,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{ "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", - "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1488,7 +1521,7 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", - "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1500,7 +1533,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { @@ -1544,8 +1577,8 @@ var map_PodAffinityTerm = map[string]string{ "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", - "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1696,9 +1729,10 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { } var map_PodResourceClaim = map[string]string{ - "": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", - "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", - "source": "Source describes where to find the ResourceClaim.", + "": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", + "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", } func (PodResourceClaim) SwaggerDoc() map[string]string { @@ -1708,7 +1742,7 @@ func (PodResourceClaim) SwaggerDoc() map[string]string { var map_PodResourceClaimStatus = map[string]string{ "": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.", "name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.", - "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", + "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", } func (PodResourceClaimStatus) SwaggerDoc() map[string]string { @@ -1725,18 +1759,19 @@ func (PodSchedulingGate) SwaggerDoc() map[string]string { } var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", - "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", - "appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.", + "supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1766,7 +1801,7 @@ var map_PodSpec = map[string]string{ "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", "serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", - "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", @@ -1789,7 +1824,7 @@ var map_PodSpec = map[string]string{ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", - "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", @@ -1943,7 +1978,7 @@ func (ProbeHandler) SwaggerDoc() map[string]string { var map_ProjectedVolumeSource = map[string]string{ "": "Represents a projected volume source", - "sources": "sources is the list of volume projections", + "sources": "sources is the list of volume projections. Each entry in this list handles one source.", "defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } @@ -2069,8 +2104,9 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string { } var map_ResourceClaim = map[string]string{ - "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", - "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.", } func (ResourceClaim) SwaggerDoc() map[string]string { @@ -2088,6 +2124,16 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { return map_ResourceFieldSelector } +var map_ResourceHealth = map[string]string{ + "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.", + "resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.", + "health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.", +} + +func (ResourceHealth) SwaggerDoc() map[string]string { + return map_ResourceHealth +} + var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -2141,6 +2187,15 @@ func (ResourceRequirements) SwaggerDoc() map[string]string { return map_ResourceRequirements } +var map_ResourceStatus = map[string]string{ + "name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.", + "resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.", +} + +func (ResourceStatus) SwaggerDoc() map[string]string { + return map_ResourceStatus +} + var map_SELinuxOptions = map[string]string{ "": "SELinuxOptions are the labels to be applied to the container", "user": "User is a SELinux user label that applies to the container.", @@ -2304,7 +2359,7 @@ var map_SecurityContext = map[string]string{ "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", - "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "procMount": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.", "appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.", } @@ -2639,7 +2694,7 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string { } var map_VolumeProjection = map[string]string{ - "": "Projection that may be projected along with other supported volume types", + "": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.", "secret": "secret information about the secret data to project", "downwardAPI": "downwardAPI information about the downwardAPI data to project", "configMap": "configMap information about the configMap data to project", @@ -2692,6 +2747,7 @@ var map_VolumeSource = map[string]string{ "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", "ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 08e927848e..3d23f7f620 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -440,32 +440,6 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimSource) DeepCopyInto(out *ClaimSource) { - *out = *in - if in.ResourceClaimName != nil { - in, out := &in.ResourceClaimName, &out.ResourceClaimName - *out = new(string) - **out = **in - } - if in.ResourceClaimTemplateName != nil { - in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource. -func (in *ClaimSource) DeepCopy() *ClaimSource { - if in == nil { - return nil - } - out := new(ClaimSource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { *out = *in @@ -1069,6 +1043,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(ContainerUser) + (*in).DeepCopyInto(*out) + } + if in.AllocatedResourcesStatus != nil { + in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus + *out = make([]ResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1082,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerUser) DeepCopyInto(out *ContainerUser) { + *out = *in + if in.Linux != nil { + in, out := &in.Linux, &out.Linux + *out = new(LinuxContainerUser) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser. +func (in *ContainerUser) DeepCopy() *ContainerUser { + if in == nil { + return nil + } + out := new(ContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { *out = *in @@ -2044,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource. +func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource { + if in == nil { + return nil + } + out := new(ImageVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { *out = *in @@ -2261,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) { + *out = *in + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser. +func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser { + if in == nil { + return nil + } + out := new(LinuxContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *List) DeepCopyInto(out *List) { *out = *in @@ -2695,6 +2739,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) { + *out = *in + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures. +func (in *NodeFeatures) DeepCopy() *NodeFeatures { + if in == nil { + return nil + } + out := new(NodeFeatures) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in @@ -2782,6 +2847,11 @@ func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatur *out = new(bool) **out = **in } + if in.UserNamespaces != nil { + in, out := &in.UserNamespaces, &out.UserNamespaces + *out = new(bool) + **out = **in + } return } @@ -2962,6 +3032,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(NodeFeatures) + (*in).DeepCopyInto(*out) + } return } @@ -3971,7 +4046,16 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) { *out = *in - in.Source.DeepCopyInto(&out.Source) + if in.ResourceClaimName != nil { + in, out := &in.ResourceClaimName, &out.ResourceClaimName + *out = new(string) + **out = **in + } + if in.ResourceClaimTemplateName != nil { + in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName + *out = new(string) + **out = **in + } return } @@ -4055,6 +4139,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = make([]int64, len(*in)) copy(*out, *in) } + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(SupplementalGroupsPolicy) + **out = **in + } if in.FSGroup != nil { in, out := &in.FSGroup, &out.FSGroup *out = new(int64) @@ -4900,6 +4989,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth. +func (in *ResourceHealth) DeepCopy() *ResourceHealth { + if in == nil { + return nil + } + out := new(ResourceHealth) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in ResourceList) DeepCopyInto(out *ResourceList) { { @@ -5081,6 +5186,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceHealth, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { *out = *in @@ -6426,6 +6552,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(EphemeralVolumeSource) (*in).DeepCopyInto(*out) } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageVolumeSource) + **out = **in + } return } diff --git a/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..6710a96d1c --- /dev/null +++ b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,274 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Binding) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Endpoints) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRange) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *List) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Namespace) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Node) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Pod) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 1 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Secret) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SecretList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Service) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/discovery/v1/doc.go b/vendor/k8s.io/api/discovery/v1/doc.go index 96ae531ce7..01913669ff 100644 --- a/vendor/k8s.io/api/discovery/v1/doc.go +++ b/vendor/k8s.io/api/discovery/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=discovery.k8s.io package v1 // import "k8s.io/api/discovery/v1" diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 6d234017b7..8ddf0dc5d3 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -54,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // deprecatedTopology contains topology information part of the v1beta1 // API. This field is deprecated, and will be removed when the v1beta1 @@ -161,7 +161,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -191,7 +191,7 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of endpoint slices repeated EndpointSlice items = 2; diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 7ebb07ca35..d6a9d0fced 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSlice represents a subset of the endpoints that implement a service. // For a given service there may be multiple EndpointSlice objects, selected by @@ -206,6 +207,7 @@ type EndpointPort struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..362867c5b9 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index ec555a40b3..55828dd97d 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -54,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. @@ -153,7 +153,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -183,7 +183,7 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of endpoint slices repeated EndpointSlice items = 2; diff --git a/vendor/k8s.io/api/events/v1/doc.go b/vendor/k8s.io/api/events/v1/doc.go index 6e320e0634..5fe700ffcf 100644 --- a/vendor/k8s.io/api/events/v1/doc.go +++ b/vendor/k8s.io/api/events/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=events.k8s.io package v1 // import "k8s.io/api/events/v1" diff --git a/vendor/k8s.io/api/events/v1/generated.proto b/vendor/k8s.io/api/events/v1/generated.proto index cfa16b021b..6c7e4cca19 100644 --- a/vendor/k8s.io/api/events/v1/generated.proto +++ b/vendor/k8s.io/api/events/v1/generated.proto @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -68,12 +68,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -88,15 +88,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -108,7 +108,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -123,6 +123,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/vendor/k8s.io/api/events/v1/types.go b/vendor/k8s.io/api/events/v1/types.go index e01a2b21e7..86b12eee15 100644 --- a/vendor/k8s.io/api/events/v1/types.go +++ b/vendor/k8s.io/api/events/v1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. // Events have a limited retention time and triggers and messages may evolve @@ -109,6 +110,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // EventList is a list of Event objects. type EventList struct { diff --git a/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..5217d1ac69 --- /dev/null +++ b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index de60bdc3e8..fbdb309701 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -72,12 +72,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -92,15 +92,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -112,7 +112,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -125,6 +125,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 60effc8f71..9bbcaa0e26 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -37,7 +37,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -63,7 +63,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -79,7 +79,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -92,14 +92,14 @@ message DaemonSetSpec { // If empty, defaulted to labels on Pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -205,7 +205,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -225,10 +225,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -241,7 +241,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -271,10 +271,10 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -440,7 +440,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -461,13 +461,13 @@ message IngressBackend { // Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; // Resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressList is a collection of Ingress. @@ -475,7 +475,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Ingress. repeated Ingress items = 2; @@ -651,7 +651,7 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior for this NetworkPolicy. // +optional @@ -710,7 +710,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -725,7 +725,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. @@ -734,7 +734,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; // IPBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. @@ -754,7 +754,7 @@ message NetworkPolicyPort { // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // If set, indicates that the range of ports from port to endPort, inclusive, // should be allowed by the policy. This field cannot be defined if the port field @@ -771,7 +771,7 @@ message NetworkPolicySpec { // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // List of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod @@ -818,7 +818,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -844,7 +844,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -860,7 +860,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -887,13 +887,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -951,7 +951,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -973,7 +973,7 @@ message RollingUpdateDaemonSet { // cause evictions during disruption. // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -989,7 +989,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -1003,14 +1003,14 @@ message RollingUpdateDeployment { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index cc2deadac0..09f58692f4 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -775,7 +775,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the diff --git a/vendor/k8s.io/api/flowcontrol/v1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go index 1bc51d4066..c9e7db1589 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io diff --git a/vendor/k8s.io/api/flowcontrol/v1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1/generated.proto index a5c6f4fc4f..33a135889e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -302,7 +302,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -327,7 +327,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -341,7 +341,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/flowcontrol/v1/types.go b/vendor/k8s.io/api/flowcontrol/v1/types.go index e62d23280e..ad72bcee22 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1/types.go @@ -106,6 +106,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -126,6 +127,7 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -381,6 +383,7 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -400,6 +403,7 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..fbab9868c7 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 04b54820c7..61ed3833ae 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -295,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index a832114afe..d6073fc925 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -295,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto index eda0f7829e..c6504d4353 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -297,7 +297,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -322,7 +322,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -336,7 +336,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto index fd55972f20..5ea5c0ec8e 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto @@ -33,7 +33,7 @@ message ImageReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the pod being evaluated optional ImageReviewSpec spec = 2; diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index d3ffd5ed17..1d13e7bab3 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=networking.k8s.io package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 22a9085a54..c72fdc8f37 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -96,7 +96,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -121,7 +121,7 @@ message IngressBackend { // service.Port must not be specified. // This is a mutually exclusive setting with "Service". // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -133,7 +133,7 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -145,7 +145,7 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IngressClasses. repeated IngressClass items = 2; @@ -200,7 +200,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Ingress. repeated Ingress items = 2; @@ -381,7 +381,7 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional @@ -438,7 +438,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -454,7 +454,7 @@ message NetworkPolicyPeer { // the pods matching podSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // namespaceSelector selects namespaces using cluster-scoped labels. This field follows // standard label selector semantics; if present but empty, it selects all namespaces. @@ -463,7 +463,7 @@ message NetworkPolicyPeer { // the pods matching podSelector in the namespaces selected by namespaceSelector. // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. @@ -483,7 +483,7 @@ message NetworkPolicyPort { // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field @@ -501,7 +501,7 @@ message NetworkPolicySpec { // the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // ingress is a list of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod @@ -541,6 +541,7 @@ message NetworkPolicySpec { } // ServiceBackendPort is the service port being referenced. +// +structType=atomic message ServiceBackendPort { // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 8ee62918b0..d75e27558d 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { @@ -214,6 +215,7 @@ type NetworkPolicyPeer struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { @@ -230,6 +232,7 @@ type NetworkPolicyList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services @@ -255,6 +258,7 @@ type Ingress struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressList is a collection of Ingress. type IngressList struct { @@ -415,7 +419,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -527,6 +531,7 @@ type IngressServiceBackend struct { } // ServiceBackendPort is the service port being referenced. +// +structType=atomic type ServiceBackendPort struct { // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". @@ -542,6 +547,7 @@ type ServiceBackendPort struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClass represents the class of the Ingress, referenced by the Ingress // Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be @@ -616,6 +622,7 @@ type IngressClassParametersReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClassList is a collection of IngressClasses. type IngressClassList struct { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..21e8c671a5 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Ingress) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClass) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index 8914fffcf8..80ec6af735 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -39,7 +39,7 @@ message IPAddress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IPAddress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +52,7 @@ message IPAddressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IPAddresses. repeated IPAddress items = 2; @@ -91,7 +91,7 @@ message ServiceCIDR { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the ServiceCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -109,7 +109,7 @@ message ServiceCIDRList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of ServiceCIDRs. repeated ServiceCIDR items = 2; @@ -119,6 +119,9 @@ message ServiceCIDRList { message ServiceCIDRSpec { // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. // This field is immutable. // +optional // +listType=atomic @@ -134,6 +137,6 @@ message ServiceCIDRStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index bcdc33b459..0e454f0263 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -109,6 +109,9 @@ type ServiceCIDR struct { type ServiceCIDRSpec struct { // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. // This field is immutable. // +optional // +listType=atomic diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 481ec06030..4c8eb57a7a 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -91,7 +91,7 @@ func (ServiceCIDRList) SwaggerDoc() map[string]string { var map_ServiceCIDRSpec = map[string]string{ "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", - "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.", } func (ServiceCIDRSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 13d4f53855..a924725f28 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -27,6 +27,7 @@ import ( proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -101,10 +102,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{2} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{3} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{4} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{2} + return fileDescriptor_9497719c79c89d2d, []int{5} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +217,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{3} + return fileDescriptor_9497719c79c89d2d, []int{6} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +245,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressClass) Reset() { *m = IngressClass{} } func (*IngressClass) ProtoMessage() {} func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{4} + return fileDescriptor_9497719c79c89d2d, []int{7} } func (m *IngressClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +273,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo func (m *IngressClassList) Reset() { *m = IngressClassList{} } func (*IngressClassList) ProtoMessage() {} func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{5} + return fileDescriptor_9497719c79c89d2d, []int{8} } func (m *IngressClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +301,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} } func (*IngressClassParametersReference) ProtoMessage() {} func (*IngressClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{6} + return fileDescriptor_9497719c79c89d2d, []int{9} } func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +329,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } func (*IngressClassSpec) ProtoMessage() {} func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{7} + return fileDescriptor_9497719c79c89d2d, []int{10} } func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +357,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{8} + return fileDescriptor_9497719c79c89d2d, []int{11} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +385,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{9} + return fileDescriptor_9497719c79c89d2d, []int{12} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,7 +413,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{10} + return fileDescriptor_9497719c79c89d2d, []int{13} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +441,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{11} + return fileDescriptor_9497719c79c89d2d, []int{14} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,7 +469,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{12} + return fileDescriptor_9497719c79c89d2d, []int{15} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +497,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{13} + return fileDescriptor_9497719c79c89d2d, []int{16} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +525,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{14} + return fileDescriptor_9497719c79c89d2d, []int{17} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -468,7 +553,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{15} + return fileDescriptor_9497719c79c89d2d, []int{18} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,7 +581,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{16} + return fileDescriptor_9497719c79c89d2d, []int{19} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -521,9 +606,152 @@ func (m *IngressTLS) XXX_DiscardUnknown() { var xxx_messageInfo_IngressTLS proto.InternalMessageInfo +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{20} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{21} +} +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) +} +func (m *ServiceCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo + +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{22} +} +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) +} +func (m *ServiceCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo + +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{23} +} +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) +} +func (m *ServiceCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{24} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1beta1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1beta1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1beta1.IPAddressSpec") proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass") @@ -539,6 +767,11 @@ func init() { proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1beta1.ParentReference") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRStatus") } func init() { @@ -546,85 +779,99 @@ func init() { } var fileDescriptor_9497719c79c89d2d = []byte{ - // 1234 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xcf, 0xda, 0x71, 0xe3, 0x8c, 0xd3, 0x34, 0xff, 0xf9, 0xe7, 0x60, 0x82, 0x6a, 0x47, 0x7b, - 0x40, 0x81, 0x36, 0xbb, 0x4d, 0x5a, 0x50, 0xb9, 0x20, 0xd8, 0x08, 0x91, 0x28, 0x21, 0x31, 0x63, - 0xf3, 0x22, 0xc4, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0x66, 0x83, 0x7a, 0x03, - 0x71, 0xe2, 0x04, 0xdf, 0x01, 0x89, 0x8f, 0x80, 0xb8, 0x20, 0x21, 0xb8, 0xe4, 0xd8, 0x63, 0x2f, - 0x44, 0xc4, 0x7c, 0x8b, 0x9e, 0xd0, 0x33, 0x3b, 0xfb, 0xe2, 0x97, 0xb4, 0x0e, 0x87, 0x9e, 0xe2, - 0x7d, 0x5e, 0x7e, 0xcf, 0xfb, 0x33, 0x4f, 0xd0, 0xf6, 0xe0, 0xa1, 0xb0, 0xbc, 0xd0, 0xa6, 0x91, - 0x67, 0x07, 0x4c, 0x7e, 0x1d, 0xf2, 0x81, 0x17, 0xf4, 0xec, 0xb3, 0x9d, 0x53, 0x26, 0xe9, 0x8e, - 0xdd, 0x63, 0x01, 0xe3, 0x54, 0xb2, 0xae, 0x15, 0xf1, 0x50, 0x86, 0xf8, 0x76, 0x22, 0x6e, 0xd1, - 0xc8, 0xb3, 0x72, 0x71, 0x4b, 0x8b, 0x6f, 0x6c, 0xf7, 0x3c, 0xd9, 0x8f, 0x4f, 0x2d, 0x37, 0x1c, - 0xda, 0xbd, 0xb0, 0x17, 0xda, 0x4a, 0xeb, 0x34, 0x7e, 0xa4, 0xbe, 0xd4, 0x87, 0xfa, 0x95, 0xa0, - 0x6d, 0x98, 0x05, 0xe3, 0x6e, 0xc8, 0x99, 0x7d, 0x36, 0x65, 0x71, 0xe3, 0x41, 0x2e, 0x33, 0xa4, - 0x6e, 0xdf, 0x0b, 0x18, 0x7f, 0x6c, 0x47, 0x83, 0x1e, 0x10, 0x84, 0x3d, 0x64, 0x92, 0xce, 0xd2, - 0xb2, 0xaf, 0xd2, 0xe2, 0x71, 0x20, 0xbd, 0x21, 0x9b, 0x52, 0x78, 0xeb, 0x45, 0x0a, 0xc2, 0xed, - 0xb3, 0x21, 0x9d, 0xd2, 0xbb, 0x7f, 0x95, 0x5e, 0x2c, 0x3d, 0xdf, 0xf6, 0x02, 0x29, 0x24, 0x9f, - 0x54, 0x32, 0xff, 0x34, 0xd0, 0xad, 0xfd, 0x4e, 0xa7, 0x75, 0x10, 0xf4, 0x38, 0x13, 0xa2, 0x45, - 0x65, 0x1f, 0x6f, 0xa2, 0xc5, 0x88, 0xca, 0x7e, 0xdd, 0xd8, 0x34, 0xb6, 0x96, 0x9d, 0x95, 0xf3, - 0x8b, 0xe6, 0xc2, 0xe8, 0xa2, 0xb9, 0x08, 0x3c, 0xa2, 0x38, 0xf8, 0x01, 0xaa, 0xc2, 0xdf, 0xce, - 0xe3, 0x88, 0xd5, 0xcb, 0x4a, 0xaa, 0x3e, 0xba, 0x68, 0x56, 0x5b, 0x9a, 0xf6, 0xac, 0xf0, 0x9b, - 0x64, 0x92, 0xf8, 0x33, 0xb4, 0x74, 0x4a, 0xdd, 0x01, 0x0b, 0xba, 0xf5, 0xd2, 0xa6, 0xb1, 0x55, - 0xdb, 0xdd, 0xb6, 0x9e, 0x5b, 0x43, 0x4b, 0x3b, 0xe5, 0x24, 0x4a, 0xce, 0x2d, 0xed, 0xc9, 0x92, - 0x26, 0x90, 0x14, 0xce, 0x1c, 0xa0, 0xf5, 0x42, 0x10, 0x24, 0xf6, 0xd9, 0x27, 0xd4, 0x8f, 0x19, - 0x6e, 0xa3, 0x0a, 0x58, 0x17, 0x75, 0x63, 0xb3, 0xbc, 0x55, 0xdb, 0xb5, 0x5e, 0x60, 0x6f, 0x22, - 0x11, 0xce, 0x4d, 0x6d, 0xb0, 0x02, 0x5f, 0x82, 0x24, 0x58, 0xe6, 0x0f, 0x25, 0xb4, 0xa4, 0xa5, - 0xf0, 0x97, 0xa8, 0x0a, 0x75, 0xef, 0x52, 0x49, 0x55, 0xba, 0x6a, 0xbb, 0xf7, 0x0a, 0x36, 0xb2, - 0x32, 0x58, 0xd1, 0xa0, 0x07, 0x04, 0x61, 0x81, 0xb4, 0x75, 0xb6, 0x63, 0x9d, 0x9c, 0x7e, 0xc5, - 0x5c, 0xf9, 0x21, 0x93, 0xd4, 0xc1, 0xda, 0x0a, 0xca, 0x69, 0x24, 0x43, 0xc5, 0x47, 0x68, 0x51, - 0x44, 0xcc, 0xd5, 0x19, 0x7b, 0x63, 0xbe, 0x8c, 0xb5, 0x23, 0xe6, 0xe6, 0x85, 0x83, 0x2f, 0xa2, - 0x50, 0x70, 0x07, 0xdd, 0x10, 0x92, 0xca, 0x58, 0xa8, 0xb2, 0xd5, 0x76, 0xef, 0xce, 0x89, 0xa7, - 0x74, 0x9c, 0x55, 0x8d, 0x78, 0x23, 0xf9, 0x26, 0x1a, 0xcb, 0xfc, 0xbe, 0x84, 0x56, 0xc7, 0x6b, - 0x85, 0xdf, 0x44, 0x35, 0xc1, 0xf8, 0x99, 0xe7, 0xb2, 0x63, 0x3a, 0x64, 0xba, 0x95, 0xfe, 0xaf, - 0xf5, 0x6b, 0xed, 0x9c, 0x45, 0x8a, 0x72, 0xb8, 0x97, 0xa9, 0xb5, 0x42, 0x2e, 0x75, 0xd0, 0x57, - 0xa7, 0x14, 0x3a, 0xdb, 0x4a, 0x3a, 0xdb, 0x3a, 0x08, 0xe4, 0x09, 0x6f, 0x4b, 0xee, 0x05, 0xbd, - 0x29, 0x43, 0x00, 0x46, 0x8a, 0xc8, 0xf8, 0x53, 0x54, 0xe5, 0x4c, 0x84, 0x31, 0x77, 0x99, 0x4e, - 0xc5, 0x58, 0x33, 0xc2, 0x0a, 0x80, 0x32, 0x41, 0xdf, 0x76, 0x8f, 0x42, 0x97, 0xfa, 0x49, 0x71, - 0x08, 0x7b, 0xc4, 0x38, 0x0b, 0x5c, 0xe6, 0xac, 0x40, 0xc3, 0x13, 0x0d, 0x41, 0x32, 0x30, 0x18, - 0xa8, 0x15, 0x9d, 0x8b, 0x3d, 0x9f, 0xbe, 0x94, 0x16, 0xf9, 0x68, 0xac, 0x45, 0xec, 0xf9, 0x4a, - 0xaa, 0x9c, 0xbb, 0xaa, 0x4f, 0xcc, 0x3f, 0x0c, 0xb4, 0x56, 0x14, 0x3c, 0xf2, 0x84, 0xc4, 0x5f, - 0x4c, 0x45, 0x62, 0xcd, 0x17, 0x09, 0x68, 0xab, 0x38, 0xd6, 0xb4, 0xa9, 0x6a, 0x4a, 0x29, 0x44, - 0xd1, 0x42, 0x15, 0x4f, 0xb2, 0xa1, 0xa8, 0x97, 0xd4, 0xac, 0xde, 0xb9, 0x46, 0x18, 0xf9, 0xa0, - 0x1e, 0x00, 0x02, 0x49, 0x80, 0xcc, 0xbf, 0x0c, 0xd4, 0x2c, 0x8a, 0xb5, 0x28, 0xa7, 0x43, 0x26, - 0x19, 0x17, 0x59, 0x19, 0xf1, 0x16, 0xaa, 0xd2, 0xd6, 0xc1, 0x07, 0x3c, 0x8c, 0xa3, 0x74, 0xdf, - 0x81, 0x7f, 0xef, 0x69, 0x1a, 0xc9, 0xb8, 0xb0, 0x15, 0x07, 0x9e, 0x5e, 0x5d, 0x85, 0xad, 0x78, - 0xe8, 0x05, 0x5d, 0xa2, 0x38, 0x20, 0x11, 0x40, 0xb3, 0x97, 0xc7, 0x25, 0x54, 0x97, 0x2b, 0x0e, - 0x6e, 0xa2, 0x8a, 0x70, 0xc3, 0x88, 0xd5, 0x17, 0x95, 0xc8, 0x32, 0xb8, 0xdc, 0x06, 0x02, 0x49, - 0xe8, 0xf8, 0x0e, 0x5a, 0x06, 0x41, 0x11, 0x51, 0x97, 0xd5, 0x2b, 0x4a, 0xe8, 0xe6, 0xe8, 0xa2, - 0xb9, 0x7c, 0x9c, 0x12, 0x49, 0xce, 0x37, 0x7f, 0x99, 0x28, 0x12, 0xd4, 0x0f, 0xef, 0x22, 0xe4, - 0x86, 0x81, 0xe4, 0xa1, 0xef, 0x33, 0xae, 0x43, 0xca, 0xda, 0x67, 0x2f, 0xe3, 0x90, 0x82, 0x14, - 0x0e, 0x10, 0x8a, 0xb2, 0xdc, 0xe8, 0x36, 0x7a, 0xe7, 0x1a, 0xf9, 0x9f, 0x91, 0x58, 0x67, 0x15, - 0xec, 0x15, 0x18, 0x05, 0x0b, 0xe6, 0xaf, 0x06, 0xaa, 0x69, 0xfd, 0x97, 0xd0, 0x58, 0x87, 0xe3, - 0x8d, 0xf5, 0xda, 0x9c, 0x8f, 0xce, 0xec, 0x9e, 0xfa, 0xcd, 0x40, 0x1b, 0xa9, 0xeb, 0x21, 0xed, - 0x3a, 0xd4, 0xa7, 0x81, 0xcb, 0x78, 0xfa, 0x1e, 0x6c, 0xa0, 0x92, 0x97, 0x36, 0x12, 0xd2, 0x00, - 0xa5, 0x83, 0x16, 0x29, 0x79, 0x11, 0xbe, 0x8b, 0xaa, 0xfd, 0x50, 0x48, 0xd5, 0x22, 0x49, 0x13, - 0x65, 0x5e, 0xef, 0x6b, 0x3a, 0xc9, 0x24, 0xf0, 0xc7, 0xa8, 0x12, 0x85, 0x5c, 0x8a, 0xfa, 0xa2, - 0xf2, 0xfa, 0xde, 0x7c, 0x5e, 0xc3, 0x6e, 0xd3, 0xcb, 0x3a, 0x7f, 0xbc, 0x00, 0x86, 0x24, 0x68, - 0xe6, 0xb7, 0x06, 0x7a, 0x65, 0x86, 0xff, 0x89, 0x0e, 0xee, 0xa2, 0x25, 0x2f, 0x61, 0xea, 0x17, - 0xf3, 0xed, 0xf9, 0xcc, 0xce, 0x48, 0x45, 0xfe, 0x5a, 0xa7, 0xaf, 0x72, 0x0a, 0x6d, 0xfe, 0x64, - 0xa0, 0xff, 0x4d, 0xf9, 0xab, 0xae, 0x0e, 0xd8, 0xf9, 0x90, 0xbc, 0x4a, 0xe1, 0xea, 0x80, 0xd5, - 0xad, 0x38, 0xf8, 0x10, 0x55, 0xd5, 0xd1, 0xe2, 0x86, 0xbe, 0x4e, 0xa0, 0x9d, 0x26, 0xb0, 0xa5, - 0xe9, 0xcf, 0x2e, 0x9a, 0xaf, 0x4e, 0x5f, 0x72, 0x56, 0xca, 0x26, 0x19, 0x00, 0x8c, 0x22, 0xe3, - 0x3c, 0xe4, 0x7a, 0x5a, 0xd5, 0x28, 0xbe, 0x0f, 0x04, 0x92, 0xd0, 0xcd, 0x9f, 0xf3, 0x26, 0x85, - 0x83, 0x02, 0xfc, 0x83, 0xe2, 0x4c, 0x5e, 0x45, 0x50, 0x3a, 0xa2, 0x38, 0x38, 0x46, 0x6b, 0xde, - 0xc4, 0x05, 0x72, 0xbd, 0x9d, 0x9c, 0xa9, 0x39, 0x75, 0x0d, 0xbf, 0x36, 0xc9, 0x21, 0x53, 0x26, - 0x4c, 0x86, 0xa6, 0xa4, 0xe0, 0x49, 0xe8, 0x4b, 0x19, 0xe9, 0x69, 0xba, 0x3f, 0xff, 0xdd, 0x93, - 0xbb, 0x50, 0x55, 0xd1, 0x75, 0x3a, 0x2d, 0xa2, 0xa0, 0xcc, 0xdf, 0x4b, 0x59, 0x3e, 0xd4, 0xa2, - 0x79, 0x37, 0x8b, 0x56, 0xed, 0x00, 0xf5, 0xcc, 0x27, 0x6b, 0x6d, 0xbd, 0xe0, 0x78, 0xc6, 0x23, - 0x53, 0xd2, 0xb8, 0x93, 0xdf, 0x83, 0xc6, 0x7f, 0xb9, 0x07, 0x6b, 0xb3, 0x6e, 0x41, 0xbc, 0x8f, - 0xca, 0xd2, 0x4f, 0x87, 0xfd, 0xf5, 0xf9, 0x10, 0x3b, 0x47, 0x6d, 0xa7, 0xa6, 0x53, 0x5e, 0xee, - 0x1c, 0xb5, 0x09, 0x40, 0xe0, 0x13, 0x54, 0xe1, 0xb1, 0xcf, 0xe0, 0x56, 0x2a, 0xcf, 0x7f, 0x7b, - 0x41, 0x06, 0xf3, 0xe1, 0x83, 0x2f, 0x41, 0x12, 0x1c, 0xf3, 0x3b, 0x03, 0xdd, 0x1c, 0xbb, 0xa8, - 0x30, 0x47, 0x2b, 0x7e, 0x61, 0x76, 0x74, 0x1e, 0x1e, 0x5e, 0x7f, 0xea, 0xf4, 0xd0, 0xaf, 0x6b, - 0xbb, 0x2b, 0x45, 0x1e, 0x19, 0xb3, 0x61, 0x52, 0x84, 0xf2, 0xb0, 0x61, 0x0e, 0xa0, 0x79, 0x93, - 0x81, 0xd7, 0x73, 0x00, 0x3d, 0x2d, 0x48, 0x42, 0x87, 0x07, 0x45, 0x30, 0x97, 0x33, 0x79, 0x9c, - 0x2f, 0xae, 0xec, 0x41, 0x69, 0x67, 0x1c, 0x52, 0x90, 0x72, 0xf6, 0xce, 0x2f, 0x1b, 0x0b, 0x4f, - 0x2e, 0x1b, 0x0b, 0x4f, 0x2f, 0x1b, 0x0b, 0xdf, 0x8c, 0x1a, 0xc6, 0xf9, 0xa8, 0x61, 0x3c, 0x19, - 0x35, 0x8c, 0xa7, 0xa3, 0x86, 0xf1, 0xf7, 0xa8, 0x61, 0xfc, 0xf8, 0x4f, 0x63, 0xe1, 0xf3, 0xdb, - 0xcf, 0xfd, 0x87, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0xf6, 0xe9, 0x27, 0x10, 0x0e, - 0x00, 0x00, + // 1457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xc5, + 0x1f, 0xcf, 0x3a, 0x71, 0xe3, 0x8c, 0xd3, 0x26, 0x9d, 0x5f, 0x0f, 0xfe, 0x05, 0xd5, 0x8e, 0x16, + 0x09, 0x85, 0x3e, 0x76, 0xdb, 0xb4, 0xa0, 0x72, 0x41, 0xd4, 0x01, 0x51, 0xab, 0x69, 0xb2, 0x8c, + 0x0d, 0x54, 0xc0, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0xc7, 0x81, 0xde, 0x40, + 0x9c, 0x38, 0xc1, 0x9d, 0x23, 0x12, 0x7f, 0x02, 0x70, 0xa0, 0x52, 0x05, 0x97, 0x1e, 0x7b, 0xec, + 0x85, 0x88, 0x9a, 0xff, 0xa2, 0x27, 0xf4, 0x9d, 0x9d, 0x7d, 0xf9, 0xd1, 0x6c, 0x38, 0xe4, 0x54, + 0xef, 0xf7, 0x3d, 0xdf, 0xe7, 0xa7, 0x41, 0x57, 0x07, 0xb7, 0x42, 0xc3, 0xf1, 0x4d, 0x1a, 0x38, + 0xa6, 0xc7, 0xc4, 0x97, 0x3e, 0x1f, 0x38, 0x5e, 0xcf, 0x3c, 0xbc, 0x7e, 0xc0, 0x04, 0xbd, 0x6e, + 0xf6, 0x98, 0xc7, 0x38, 0x15, 0xac, 0x6b, 0x04, 0xdc, 0x17, 0x3e, 0xbe, 0x18, 0x89, 0x1b, 0x34, + 0x70, 0x8c, 0x54, 0xdc, 0x50, 0xe2, 0x1b, 0x57, 0x7b, 0x8e, 0xe8, 0x8f, 0x0e, 0x0c, 0xdb, 0x1f, + 0x9a, 0x3d, 0xbf, 0xe7, 0x9b, 0x52, 0xeb, 0x60, 0xf4, 0x40, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0xb2, + 0xb6, 0xa1, 0x67, 0x9c, 0xdb, 0x3e, 0x67, 0xe6, 0xe1, 0x94, 0xc7, 0x8d, 0x9b, 0xa9, 0xcc, 0x90, + 0xda, 0x7d, 0xc7, 0x63, 0xfc, 0xa1, 0x19, 0x0c, 0x7a, 0x40, 0x08, 0xcd, 0x21, 0x13, 0x74, 0x96, + 0x96, 0x39, 0x4f, 0x8b, 0x8f, 0x3c, 0xe1, 0x0c, 0xd9, 0x94, 0xc2, 0x9b, 0xc7, 0x29, 0x84, 0x76, + 0x9f, 0x0d, 0xe9, 0x94, 0xde, 0x8d, 0x79, 0x7a, 0x23, 0xe1, 0xb8, 0xa6, 0xe3, 0x89, 0x50, 0xf0, + 0x49, 0x25, 0xfd, 0x4f, 0x0d, 0xad, 0xdd, 0xe9, 0x74, 0xac, 0x96, 0xd7, 0xe3, 0x2c, 0x0c, 0x2d, + 0x2a, 0xfa, 0x78, 0x13, 0x2d, 0x05, 0x54, 0xf4, 0x6b, 0xda, 0xa6, 0xb6, 0xb5, 0xd2, 0x5c, 0x7d, + 0x72, 0xd4, 0x58, 0x18, 0x1f, 0x35, 0x96, 0x80, 0x47, 0x24, 0x07, 0xdf, 0x44, 0x15, 0xf8, 0xb7, + 0xf3, 0x30, 0x60, 0xb5, 0x45, 0x29, 0x55, 0x1b, 0x1f, 0x35, 0x2a, 0x96, 0xa2, 0xbd, 0xc8, 0xfc, + 0x26, 0x89, 0x24, 0xbe, 0x8f, 0x96, 0x0f, 0xa8, 0x3d, 0x60, 0x5e, 0xb7, 0x56, 0xda, 0xd4, 0xb6, + 0xaa, 0xdb, 0x57, 0x8d, 0x97, 0xd6, 0xd0, 0x50, 0x41, 0x35, 0x23, 0xa5, 0xe6, 0x9a, 0x8a, 0x64, + 0x59, 0x11, 0x48, 0x6c, 0x4e, 0x1f, 0xa0, 0x0b, 0x99, 0x47, 0x90, 0x91, 0xcb, 0x3e, 0xa2, 0xee, + 0x88, 0xe1, 0x36, 0x2a, 0x83, 0xf7, 0xb0, 0xa6, 0x6d, 0x2e, 0x6e, 0x55, 0xb7, 0x8d, 0x63, 0xfc, + 0x4d, 0x24, 0xa2, 0x79, 0x56, 0x39, 0x2c, 0xc3, 0x57, 0x48, 0x22, 0x5b, 0xfa, 0x23, 0x0d, 0xad, + 0xb4, 0xac, 0xdb, 0xdd, 0x2e, 0xc8, 0xe1, 0xcf, 0x51, 0x05, 0x2a, 0xdf, 0xa5, 0x82, 0xca, 0x84, + 0x55, 0xb7, 0xaf, 0x65, 0xbc, 0x24, 0x85, 0x30, 0x82, 0x41, 0x0f, 0x08, 0xa1, 0x01, 0xd2, 0xc6, + 0xe1, 0x75, 0x63, 0xff, 0xe0, 0x0b, 0x66, 0x8b, 0x7b, 0x4c, 0xd0, 0x26, 0x56, 0x7e, 0x50, 0x4a, + 0x23, 0x89, 0x55, 0xbc, 0x87, 0x96, 0xc2, 0x80, 0xd9, 0x2a, 0x67, 0x57, 0x8e, 0xcb, 0x59, 0x1c, + 0x59, 0x3b, 0x60, 0x76, 0x5a, 0x3c, 0xf8, 0x22, 0xd2, 0x8e, 0xfe, 0xbb, 0x86, 0xce, 0x26, 0x52, + 0xbb, 0x4e, 0x28, 0xf0, 0x67, 0x53, 0x6f, 0x30, 0x8a, 0xbd, 0x01, 0xb4, 0xe5, 0x0b, 0xd6, 0x95, + 0x9f, 0x4a, 0x4c, 0xc9, 0xc4, 0x7f, 0x0f, 0x95, 0x1d, 0xc1, 0x86, 0x61, 0xad, 0x24, 0x8b, 0xb0, + 0x55, 0xf4, 0x01, 0x69, 0xfa, 0x5b, 0xa0, 0x4e, 0x22, 0x2b, 0xba, 0x9b, 0x89, 0x1e, 0x5e, 0x85, + 0x3f, 0x45, 0x2b, 0x01, 0xe5, 0xcc, 0x13, 0x84, 0x3d, 0x98, 0x11, 0xfe, 0x2c, 0x1f, 0x56, 0x2c, + 0xcf, 0x38, 0xf3, 0x6c, 0xd6, 0x3c, 0x3b, 0x3e, 0x6a, 0xac, 0x24, 0x44, 0x92, 0xda, 0xd3, 0xbf, + 0x2f, 0xa1, 0x65, 0xd5, 0x12, 0xa7, 0x50, 0xea, 0xdd, 0x5c, 0xa9, 0x2f, 0x15, 0x1b, 0x8f, 0x79, + 0x85, 0xc6, 0x1d, 0x74, 0x26, 0x14, 0x54, 0x8c, 0x42, 0x39, 0xa3, 0x05, 0x5a, 0x47, 0xd9, 0x93, + 0x3a, 0xcd, 0x73, 0xca, 0xe2, 0x99, 0xe8, 0x9b, 0x28, 0x5b, 0xfa, 0x77, 0x25, 0x74, 0x2e, 0x3f, + 0x98, 0xf8, 0x0d, 0x54, 0x0d, 0x19, 0x3f, 0x74, 0x6c, 0xb6, 0x47, 0x87, 0x4c, 0xed, 0x8d, 0xff, + 0x29, 0xfd, 0x6a, 0x3b, 0x65, 0x91, 0xac, 0x1c, 0xee, 0x25, 0x6a, 0x96, 0xcf, 0x85, 0x7a, 0xf4, + 0xfc, 0x94, 0xc2, 0x1a, 0x33, 0xa2, 0x35, 0x66, 0xb4, 0x3c, 0xb1, 0xcf, 0xdb, 0x82, 0x3b, 0x5e, + 0x6f, 0xca, 0x11, 0x18, 0x23, 0x59, 0xcb, 0xf8, 0x63, 0x54, 0xe1, 0x2c, 0xf4, 0x47, 0xdc, 0x66, + 0x2a, 0x15, 0xb9, 0xcd, 0x03, 0xfb, 0x1e, 0xca, 0x04, 0x4b, 0xaa, 0xbb, 0xeb, 0xdb, 0xd4, 0x8d, + 0x8a, 0x93, 0xf6, 0xc7, 0x2a, 0xb4, 0x36, 0x51, 0x26, 0x48, 0x62, 0x0c, 0xb6, 0xe7, 0xaa, 0xca, + 0xc5, 0x8e, 0x4b, 0x4f, 0xa5, 0x45, 0x3e, 0xc8, 0xb5, 0x88, 0x59, 0xac, 0xa4, 0x32, 0xb8, 0xb9, + 0x0b, 0xe1, 0x0f, 0x0d, 0xad, 0x67, 0x05, 0x4f, 0x61, 0x27, 0x58, 0xf9, 0x9d, 0x70, 0xf9, 0x04, + 0xcf, 0x98, 0xb3, 0x16, 0xfe, 0xd2, 0x50, 0x23, 0x2b, 0x66, 0x51, 0x4e, 0x87, 0x4c, 0x30, 0x1e, + 0x26, 0x65, 0xc4, 0x5b, 0xa8, 0x42, 0xad, 0xd6, 0xfb, 0xdc, 0x1f, 0x05, 0xf1, 0x71, 0x83, 0xf8, + 0x6e, 0x2b, 0x1a, 0x49, 0xb8, 0x70, 0x02, 0x07, 0x8e, 0xba, 0x53, 0x99, 0x13, 0x78, 0xd7, 0xf1, + 0xba, 0x44, 0x72, 0x40, 0xc2, 0x83, 0x66, 0x5f, 0xcc, 0x4b, 0xc8, 0x2e, 0x97, 0x1c, 0xdc, 0x40, + 0xe5, 0xd0, 0xf6, 0x03, 0x56, 0x5b, 0x92, 0x22, 0x2b, 0x10, 0x72, 0x1b, 0x08, 0x24, 0xa2, 0xe3, + 0xcb, 0x68, 0x05, 0x04, 0xc3, 0x80, 0xda, 0xac, 0x56, 0x96, 0x42, 0x72, 0x11, 0xed, 0xc5, 0x44, + 0x92, 0xf2, 0xf5, 0x5f, 0x26, 0x8a, 0x24, 0x57, 0xdf, 0x36, 0x42, 0xb6, 0xef, 0x09, 0xee, 0xbb, + 0x2e, 0xe3, 0xea, 0x49, 0x49, 0xfb, 0xec, 0x24, 0x1c, 0x92, 0x91, 0xc2, 0x1e, 0x42, 0x41, 0x92, + 0x1b, 0xd5, 0x46, 0x6f, 0x9f, 0x20, 0xff, 0x33, 0x12, 0xdb, 0x3c, 0x07, 0xfe, 0x32, 0x8c, 0x8c, + 0x07, 0xfd, 0x37, 0x0d, 0x55, 0x95, 0xfe, 0x29, 0x34, 0xd6, 0xdd, 0x7c, 0x63, 0xbd, 0x56, 0x10, + 0x61, 0xcc, 0xee, 0xa9, 0x47, 0x1a, 0xda, 0x88, 0x43, 0xf7, 0x69, 0xb7, 0x49, 0x5d, 0xea, 0xd9, + 0x8c, 0xc7, 0xf7, 0x60, 0x03, 0x95, 0x9c, 0xb8, 0x91, 0x90, 0x32, 0x50, 0x6a, 0x59, 0xa4, 0xe4, + 0x04, 0xf8, 0x0a, 0xaa, 0xf4, 0xfd, 0x50, 0xc8, 0x16, 0x89, 0x9a, 0x28, 0x89, 0xfa, 0x8e, 0xa2, + 0x93, 0x44, 0x02, 0x7f, 0x88, 0xca, 0x81, 0xcf, 0x45, 0x58, 0x5b, 0x92, 0x51, 0x5f, 0x2b, 0x16, + 0x35, 0xec, 0x36, 0xb5, 0xac, 0x53, 0xa4, 0x02, 0x66, 0x48, 0x64, 0x4d, 0xff, 0x46, 0x43, 0xff, + 0x9f, 0x11, 0x7f, 0xa4, 0x83, 0xbb, 0x68, 0xd9, 0x89, 0x98, 0x0a, 0x1e, 0xbd, 0x55, 0xcc, 0xed, + 0x8c, 0x54, 0xa4, 0xd0, 0x2c, 0x86, 0x60, 0xb1, 0x69, 0xfd, 0x27, 0x0d, 0x9d, 0x9f, 0x8a, 0x57, + 0x42, 0x4c, 0xd8, 0xf9, 0x90, 0xbc, 0x72, 0x06, 0x62, 0xc2, 0xea, 0x96, 0x1c, 0x7c, 0x17, 0x55, + 0x24, 0x42, 0xb5, 0x7d, 0x57, 0x25, 0xd0, 0x8c, 0x13, 0x68, 0x29, 0xfa, 0x8b, 0xa3, 0xc6, 0x2b, + 0xd3, 0xb0, 0xdd, 0x88, 0xd9, 0x24, 0x31, 0x00, 0xa3, 0xc8, 0x38, 0xf7, 0xb9, 0x9a, 0x56, 0x39, + 0x8a, 0xef, 0x01, 0x81, 0x44, 0x74, 0xfd, 0xe7, 0xb4, 0x49, 0x01, 0x3d, 0x42, 0x7c, 0x50, 0x9c, + 0x49, 0x08, 0x0c, 0xa5, 0x23, 0x92, 0x83, 0x47, 0x68, 0xdd, 0x99, 0x80, 0x9b, 0x27, 0xdb, 0xc9, + 0x89, 0x5a, 0xb3, 0xa6, 0xcc, 0xaf, 0x4f, 0x72, 0xc8, 0x94, 0x0b, 0x9d, 0xa1, 0x29, 0x29, 0x38, + 0x09, 0x7d, 0x21, 0x02, 0x35, 0x4d, 0x37, 0x8a, 0x83, 0xdc, 0x34, 0x84, 0x8a, 0x7c, 0x5d, 0xa7, + 0x63, 0x11, 0x69, 0x4a, 0x7f, 0x5c, 0x4a, 0xf2, 0x21, 0x17, 0xcd, 0x3b, 0xc9, 0x6b, 0xe5, 0x0e, + 0x90, 0x67, 0x3e, 0x5a, 0x6b, 0x17, 0x32, 0x81, 0x27, 0x3c, 0x32, 0x25, 0x8d, 0x3b, 0x29, 0xf8, + 0xd7, 0xfe, 0x0b, 0xf8, 0xaf, 0xce, 0x02, 0xfe, 0xf8, 0x0e, 0x5a, 0x14, 0x6e, 0x3c, 0xec, 0xaf, + 0x17, 0xb3, 0xd8, 0xd9, 0x6d, 0x37, 0xab, 0x2a, 0xe5, 0x8b, 0x9d, 0xdd, 0x36, 0x01, 0x13, 0x78, + 0x1f, 0x95, 0xf9, 0xc8, 0x65, 0x80, 0x95, 0x16, 0x8b, 0x63, 0x2f, 0xc8, 0x60, 0x3a, 0x7c, 0xf0, + 0x15, 0x92, 0xc8, 0x8e, 0xfe, 0x2d, 0xc0, 0xec, 0x2c, 0xa2, 0xc2, 0x1c, 0xad, 0xba, 0x99, 0xd9, + 0x51, 0x79, 0xb8, 0x75, 0xf2, 0xa9, 0x53, 0x43, 0x7f, 0x41, 0xf9, 0x5d, 0xcd, 0xf2, 0x48, 0xce, + 0x87, 0x4e, 0x11, 0x4a, 0x9f, 0x0d, 0x73, 0x00, 0xcd, 0x1b, 0x0d, 0xbc, 0x9a, 0x03, 0xe8, 0xe9, + 0x90, 0x44, 0x74, 0x38, 0x28, 0x21, 0xb3, 0x39, 0x13, 0x7b, 0xe9, 0xe2, 0x4a, 0x0e, 0x4a, 0x3b, + 0xe1, 0x90, 0x8c, 0x94, 0xfe, 0xab, 0x86, 0xd6, 0x26, 0x00, 0x35, 0x7e, 0x15, 0x95, 0x7b, 0x99, + 0x33, 0x9b, 0x64, 0x28, 0xba, 0xb3, 0x11, 0x0f, 0x76, 0x64, 0x02, 0xcb, 0x26, 0x76, 0xe4, 0x34, + 0xd6, 0xc2, 0x66, 0xf6, 0x5a, 0x46, 0x73, 0x7c, 0x5e, 0x89, 0xcf, 0xbc, 0x98, 0xc9, 0x85, 0x5e, + 0x9a, 0x77, 0xa1, 0xf5, 0x1f, 0x4b, 0x28, 0x06, 0x8d, 0x3b, 0xad, 0x77, 0xc9, 0x29, 0xa0, 0x37, + 0x2b, 0x87, 0xde, 0x8e, 0xfb, 0x6f, 0x4a, 0x26, 0xb6, 0xb9, 0x20, 0xff, 0xfe, 0x04, 0xc8, 0xbf, + 0x76, 0x02, 0x9b, 0x2f, 0x07, 0xfa, 0x8f, 0x35, 0xb4, 0x96, 0x91, 0x3e, 0x85, 0xe3, 0xbd, 0x9f, + 0x3f, 0xde, 0x97, 0x8a, 0x3f, 0x65, 0xce, 0x01, 0xdf, 0xce, 0xbd, 0x40, 0x6e, 0xb2, 0x06, 0x2a, + 0xdb, 0x4e, 0x97, 0xe7, 0x46, 0x00, 0x98, 0x21, 0x89, 0xe8, 0xfa, 0x57, 0xe8, 0xfc, 0x54, 0x8e, + 0xb0, 0x2d, 0x81, 0x56, 0xd7, 0x11, 0x8e, 0xef, 0xc5, 0xe7, 0xd2, 0x2c, 0xf6, 0xf2, 0x9d, 0x58, + 0x2f, 0x87, 0xcc, 0x94, 0x29, 0x92, 0x31, 0xdb, 0xdc, 0x79, 0xf2, 0xbc, 0xbe, 0xf0, 0xf4, 0x79, + 0x7d, 0xe1, 0xd9, 0xf3, 0xfa, 0xc2, 0xd7, 0xe3, 0xba, 0xf6, 0x64, 0x5c, 0xd7, 0x9e, 0x8e, 0xeb, + 0xda, 0xb3, 0x71, 0x5d, 0xfb, 0x7b, 0x5c, 0xd7, 0x7e, 0xf8, 0xa7, 0xbe, 0xf0, 0xc9, 0xc5, 0x97, + 0xfe, 0x99, 0xec, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xc2, 0xa4, 0xff, 0x46, 0x13, 0x00, + 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -709,7 +956,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Ingress) Marshal() (dAtA []byte, err error) { +func (m *IPAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -719,18 +966,18 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -738,9 +985,9 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -748,9 +995,46 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -762,7 +1046,7 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -772,19 +1056,19 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Resource != nil { + if m.ParentRef != nil { { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -792,27 +1076,12 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - { - size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x12 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IngressClass) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -822,16 +1091,26 @@ func (m *IngressClass) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -855,7 +1134,7 @@ func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressClassList) Marshal() (dAtA []byte, err error) { +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -865,23 +1144,116 @@ func (m *IngressClassList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } @@ -1378,108 +1750,363 @@ func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Ingress) Size() (n int) { - if m == nil { - return 0 +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *IngressBackend) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *IngressClass) Size() (n int) { - if m == nil { - return 0 +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *IngressClassList) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1673,31 +2300,110 @@ func (m *IngressTLS) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" + +func (m *ServiceCIDR) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceCIDRList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, @@ -1706,6 +2412,43 @@ func (this *HTTPIngressRuleValue) String() string { }, "") return s } +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *Ingress) String() string { if this == nil { return "nil" @@ -1900,22 +2643,1172 @@ func (this *IngressTLS) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IngressClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1938,15 +3831,15 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1974,13 +3867,14 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1990,28 +3884,27 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2039,64 +3932,46 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2106,25 +3981,24 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2147,7 +4021,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2170,17 +4044,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2190,28 +4064,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Controller = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2238,40 +4111,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Parameters == nil { + m.Parameters = &IngressClassParametersReference{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2296,7 +4139,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2319,47 +4162,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2386,13 +4197,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2419,10 +4230,8 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2447,7 +4256,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClass) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2470,17 +4279,17 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2490,28 +4299,59 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2538,7 +4378,8 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2563,7 +4404,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassList) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2586,48 +4427,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2654,8 +4462,8 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IngressClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2680,7 +4488,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2703,17 +4511,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2723,28 +4531,14 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.APIGroup = &s - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2772,11 +4566,11 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2804,11 +4598,62 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2836,14 +4681,13 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Scope = &s + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2853,24 +4697,24 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2893,7 +4737,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2912,51 +4756,19 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2983,10 +4795,10 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parameters == nil { - m.Parameters = &IngressClassParametersReference{} + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3011,7 +4823,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3034,15 +4846,15 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3069,13 +4881,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3102,66 +4917,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3171,27 +4936,29 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3219,11 +4986,62 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3250,8 +5068,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3276,7 +5093,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { +func (m *IngressTLS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3299,17 +5116,17 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3319,25 +5136,55 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3360,7 +5207,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *ParentReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3383,17 +5230,49 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.Port = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3403,14 +5282,27 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3438,11 +5330,11 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3470,8 +5362,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3494,7 +5385,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3517,17 +5408,17 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3537,27 +5428,28 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3584,63 +5476,13 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3667,10 +5509,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3695,7 +5534,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3718,15 +5557,15 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3753,50 +5592,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3823,44 +5625,11 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ServiceCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3882,7 +5651,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3905,17 +5674,17 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3925,24 +5694,23 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -3965,7 +5733,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3988,17 +5756,17 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4008,55 +5776,25 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index f36df9ec19..3368dcaec3 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -73,6 +73,44 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name @@ -81,7 +119,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -102,13 +140,13 @@ message IngressBackend { // servicePort Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -120,7 +158,7 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -132,7 +170,7 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IngressClasses. repeated IngressClass items = 2; @@ -186,7 +224,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Ingress. repeated Ingress items = 2; @@ -351,3 +389,74 @@ message IngressTLS { optional string secretName = 2; } +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; +} + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + repeated string cidrs = 1; +} + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; +} + diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go index 04234953e6..9d2a13cc68 100644 --- a/vendor/k8s.io/api/networking/v1beta1/register.go +++ b/vendor/k8s.io/api/networking/v1beta1/register.go @@ -51,6 +51,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, + &IPAddress{}, + &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 34dfe76aa3..cd7126a5a8 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -218,7 +218,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -421,3 +421,133 @@ type IngressClassList struct { // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index b2373669fe..9d27517f3b 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -47,6 +47,35 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -201,4 +230,55 @@ func (IngressTLS) SwaggerDoc() map[string]string { return map_IngressTLS } +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go new file mode 100644 index 0000000000..bc2207766f --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index 005d64e7fd..1a6869cd6d 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -71,6 +72,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Ingress) DeepCopyInto(out *Ingress) { *out = *in @@ -448,3 +530,124 @@ func (in *IngressTLS) DeepCopy() *IngressTLS { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { + if in == nil { + return nil + } + out := new(ServiceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { + if in == nil { + return nil + } + out := new(ServiceCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { + if in == nil { + return nil + } + out := new(ServiceCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { + if in == nil { + return nil + } + out := new(ServiceCIDRStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go index e8b4c7ec7f..a876fd5fe0 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,42 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *Ingress) APILifecycleIntroduced() (major, minor int) { @@ -120,3 +156,39 @@ func (in *IngressList) APILifecycleReplacement() schema.GroupVersionKind { func (in *IngressList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/node/v1/doc.go b/vendor/k8s.io/api/node/v1/doc.go index 12cbcb8a0e..57ca52445b 100644 --- a/vendor/k8s.io/api/node/v1/doc.go +++ b/vendor/k8s.io/api/node/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=node.k8s.io package v1 // import "k8s.io/api/node/v1" diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 0152d5e3ab..e6b8852ec1 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values @@ -80,7 +80,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -103,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index b00f58772c..169862ea94 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run @@ -93,6 +94,7 @@ type Scheduling struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { diff --git a/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..7497955688 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClass) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index 4673e9261d..bc68718d90 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -59,7 +59,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -113,6 +113,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 54dbc0995a..497027e033 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1beta1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values @@ -80,7 +80,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -103,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go index 177cdf5236..c51e02685a 100644 --- a/vendor/k8s.io/api/policy/v1/doc.go +++ b/vendor/k8s.io/api/policy/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index a79e710284..57128e8112 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1"; message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -47,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -63,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of PodDisruptionBudgets repeated PodDisruptionBudget items = 2; @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec { // all pods within the namespace. // +patchStrategy=replace // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods // should be considered for eviction. Current implementation considers healthy pods, @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 45b9550f4a..f05367ebe4 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -170,6 +170,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { @@ -188,6 +189,7 @@ type PodDisruptionBudget struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { @@ -203,6 +205,7 @@ type PodDisruptionBudgetList struct { // +genclient // +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.22 // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..d6663b9234 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Eviction) APILifecycleIntroduced() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudget) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudgetList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index d1409913f1..91e33f2332 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1beta1"; message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -47,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -63,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual PodDisruptionBudget objects repeated PodDisruptionBudget items = 2; @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec { // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods // should be considered for eviction. Current implementation considers healthy pods, @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 80f43ce922..b0e4e5b5b5 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=rbac.authorization.k8s.io package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 62f5e558ba..87b8f832d3 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -34,14 +34,14 @@ message AggregationRule { // If any of the selectors match, then the ClusterRole's permissions will be added // +optional // +listType=atomic - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional @@ -60,7 +60,7 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -77,7 +77,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -87,7 +87,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -128,7 +128,7 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional @@ -142,7 +142,7 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -159,7 +159,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -169,7 +169,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 8bef1ac462..f9628b8536 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -84,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -106,6 +106,7 @@ type RoleRef struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { @@ -122,6 +123,7 @@ type Role struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given @@ -144,6 +146,7 @@ type RoleBinding struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { @@ -157,6 +160,7 @@ type RoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleList is a collection of Roles type RoleList struct { @@ -172,6 +176,7 @@ type RoleList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { @@ -204,6 +209,7 @@ type AggregationRule struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. @@ -225,6 +231,7 @@ type ClusterRoleBinding struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { @@ -238,6 +245,7 @@ type ClusterRoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..f6f74413b8 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRole) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Role) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 170e008a56..19d43cdee5 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -34,7 +34,7 @@ message AggregationRule { // If any of the selectors match, then the ClusterRole's permissions will be added // +optional // +listType=atomic - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -42,7 +42,7 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional @@ -62,7 +62,7 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -79,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -90,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -132,7 +132,7 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional @@ -147,7 +147,7 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -164,7 +164,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -175,7 +175,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 9a0a219774..2146b4ce39 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -84,7 +84,7 @@ type Subject struct { // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. // +k8s:conversion-gen=false // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 7dfc50d7eb..8bfbd0c8ac 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -34,7 +34,7 @@ message AggregationRule { // If any of the selectors match, then the ClusterRole's permissions will be added // +optional // +listType=atomic - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -42,7 +42,7 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional @@ -62,7 +62,7 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -79,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -90,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -133,7 +133,7 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional @@ -148,7 +148,7 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -165,7 +165,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -176,7 +176,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index f761f81a6f..9cfaaceb92 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -84,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto deleted file mode 100644 index 4a6a5bab6c..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/generated.proto +++ /dev/null @@ -1,749 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.resource.v1alpha2; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/resource/v1alpha2"; - -// AllocationResult contains attributes of an allocated resource. -message AllocationResult { - // ResourceHandles contain the state associated with an allocation that - // should be maintained throughout the lifetime of a claim. Each - // ResourceHandle contains data that should be passed to a specific kubelet - // plugin once it lands on a node. This data is returned by the driver - // after a successful allocation and is opaque to Kubernetes. Driver - // documentation may explain to users how to interpret this data if needed. - // - // Setting this field is optional. It has a maximum size of 32 entries. - // If null (or empty), it is assumed this allocation will be processed by a - // single kubelet plugin with no ResourceHandle data attached. The name of - // the kubelet plugin invoked will match the DriverName set in the - // ResourceClaimStatus this AllocationResult is embedded in. - // - // +listType=atomic - // +optional - repeated ResourceHandle resourceHandles = 1; - - // This field will get set by the resource driver after it has allocated - // the resource to inform the scheduler where it can schedule Pods using - // the ResourceClaim. - // - // Setting this field is optional. If null, the resource is available - // everywhere. - // +optional - optional k8s.io.api.core.v1.NodeSelector availableOnNodes = 2; - - // Shareable determines whether the resource supports more - // than one consumer at a time. - // +optional - optional bool shareable = 3; -} - -// AllocationResultModel must have one and only one field set. -message AllocationResultModel { - // NamedResources describes the allocation result when using the named resources model. - // - // +optional - optional NamedResourcesAllocationResult namedResources = 1; -} - -// DriverAllocationResult contains vendor parameters and the allocation result for -// one request. -message DriverAllocationResult { - // VendorRequestParameters are the per-request configuration parameters - // from the time that the claim was allocated. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorRequestParameters = 1; - - optional AllocationResultModel allocationResultModel = 2; -} - -// DriverRequests describes all resources that are needed from one particular driver. -message DriverRequests { - // DriverName is the name used by the DRA driver kubelet plugin. - optional string driverName = 1; - - // VendorParameters are arbitrary setup parameters for all requests of the - // claim. They are ignored while allocating the claim. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 2; - - // Requests describes all resources that are needed from the driver. - // +listType=atomic - repeated ResourceRequest requests = 3; -} - -// NamedResourcesAllocationResult is used in AllocationResultModel. -message NamedResourcesAllocationResult { - // Name is the name of the selected resource instance. - optional string name = 1; -} - -// NamedResourcesAttribute is a combination of an attribute name and its value. -message NamedResourcesAttribute { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - optional string name = 1; - - optional NamedResourcesAttributeValue attributeValue = 2; -} - -// NamedResourcesAttributeValue must have one and only one field set. -message NamedResourcesAttributeValue { - // QuantityValue is a quantity. - optional k8s.io.apimachinery.pkg.api.resource.Quantity quantity = 6; - - // BoolValue is a true/false value. - optional bool bool = 2; - - // IntValue is a 64-bit integer. - optional int64 int = 7; - - // IntSliceValue is an array of 64-bit integers. - optional NamedResourcesIntSlice intSlice = 8; - - // StringValue is a string. - optional string string = 5; - - // StringSliceValue is an array of strings. - optional NamedResourcesStringSlice stringSlice = 9; - - // VersionValue is a semantic version according to semver.org spec 2.0.0. - optional string version = 10; -} - -// NamedResourcesFilter is used in ResourceFilterModel. -message NamedResourcesFilter { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - optional string selector = 1; -} - -// NamedResourcesInstance represents one individual hardware instance that can be selected based -// on its attributes. -message NamedResourcesInstance { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - optional string name = 1; - - // Attributes defines the attributes of this resource instance. - // The name of each attribute must be unique. - // - // +listType=atomic - // +optional - repeated NamedResourcesAttribute attributes = 2; -} - -// NamedResourcesIntSlice contains a slice of 64-bit integers. -message NamedResourcesIntSlice { - // Ints is the slice of 64-bit integers. - // - // +listType=atomic - repeated int64 ints = 1; -} - -// NamedResourcesRequest is used in ResourceRequestModel. -message NamedResourcesRequest { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - optional string selector = 1; -} - -// NamedResourcesResources is used in ResourceModel. -message NamedResourcesResources { - // The list of all individual resources instances currently available. - // - // +listType=atomic - repeated NamedResourcesInstance instances = 1; -} - -// NamedResourcesStringSlice contains a slice of strings. -message NamedResourcesStringSlice { - // Strings is the slice of strings. - // - // +listType=atomic - repeated string strings = 1; -} - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message PodSchedulingContext { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes where resources for the Pod are needed. - optional PodSchedulingContextSpec spec = 2; - - // Status describes where resources for the Pod can be allocated. - // +optional - optional PodSchedulingContextStatus status = 3; -} - -// PodSchedulingContextList is a collection of Pod scheduling objects. -message PodSchedulingContextList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of PodSchedulingContext objects. - repeated PodSchedulingContext items = 2; -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -message PodSchedulingContextSpec { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // +optional - optional string selectedNode = 1; - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - repeated string potentialNodes = 2; -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -message PodSchedulingContextStatus { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - repeated ResourceClaimSchedulingStatus resourceClaims = 1; -} - -// ResourceClaim describes which resources are needed by a resource consumer. -// Its status tracks whether the resource has been allocated and what the -// resulting attributes are. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message ResourceClaim { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes the desired attributes of a resource that then needs - // to be allocated. It can only be set once when creating the - // ResourceClaim. - optional ResourceClaimSpec spec = 2; - - // Status describes whether the resource is available and with which - // attributes. - // +optional - optional ResourceClaimStatus status = 3; -} - -// ResourceClaimConsumerReference contains enough information to let you -// locate the consumer of a ResourceClaim. The user must be a resource in the same -// namespace as the ResourceClaim. -message ResourceClaimConsumerReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Resource is the type of resource being referenced, for example "pods". - optional string resource = 3; - - // Name is the name of resource being referenced. - optional string name = 4; - - // UID identifies exactly one incarnation of the resource. - optional string uid = 5; -} - -// ResourceClaimList is a collection of claims. -message ResourceClaimList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource claims. - repeated ResourceClaim items = 2; -} - -// ResourceClaimParameters defines resource requests for a ResourceClaim in an -// in-tree format understood by Kubernetes. -message ResourceClaimParameters { - // Standard object metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the claim parameters when the parameter reference of the claim refers - // to some unknown type. - // +optional - optional ResourceClaimParametersReference generatedFrom = 2; - - // Shareable indicates whether the allocated claim is meant to be shareable - // by multiple consumers at the same time. - // +optional - optional bool shareable = 3; - - // DriverRequests describes all resources that are needed for the - // allocated claim. A single claim may use resources coming from - // different drivers. For each driver, this array has at most one - // entry which then may have one or more per-driver requests. - // - // May be empty, in which case the claim can always be allocated. - // - // +listType=atomic - repeated DriverRequests driverRequests = 4; -} - -// ResourceClaimParametersList is a collection of ResourceClaimParameters. -message ResourceClaimParametersList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of node resource capacity objects. - repeated ResourceClaimParameters items = 2; -} - -// ResourceClaimParametersReference contains enough information to let you -// locate the parameters for a ResourceClaim. The object must be in the same -// namespace as the ResourceClaim. -message ResourceClaimParametersReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata, for example "ConfigMap". - optional string kind = 2; - - // Name is the name of resource being referenced. - optional string name = 3; -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -message ResourceClaimSchedulingStatus { - // Name matches the pod.spec.resourceClaims[*].Name field. - // +optional - optional string name = 1; - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - repeated string unsuitableNodes = 2; -} - -// ResourceClaimSpec defines how a resource is to be allocated. -message ResourceClaimSpec { - // ResourceClassName references the driver and additional parameters - // via the name of a ResourceClass that was created as part of the - // driver deployment. - optional string resourceClassName = 1; - - // ParametersRef references a separate object with arbitrary parameters - // that will be used by the driver when allocating a resource for the - // claim. - // - // The object must be in the same namespace as the ResourceClaim. - // +optional - optional ResourceClaimParametersReference parametersRef = 2; - - // Allocation can start immediately or when a Pod wants to use the - // resource. "WaitForFirstConsumer" is the default. - // +optional - optional string allocationMode = 3; -} - -// ResourceClaimStatus tracks whether the resource has been allocated and what -// the resulting attributes are. -message ResourceClaimStatus { - // DriverName is a copy of the driver name from the ResourceClass at - // the time when allocation started. - // +optional - optional string driverName = 1; - - // Allocation is set by the resource driver once a resource or set of - // resources has been allocated successfully. If this is not specified, the - // resources have not been allocated yet. - // +optional - optional AllocationResult allocation = 2; - - // ReservedFor indicates which entities are currently allowed to use - // the claim. A Pod which references a ResourceClaim which is not - // reserved for that Pod will not be started. - // - // There can be at most 32 such reservations. This may get increased in - // the future, but not reduced. - // - // +listType=map - // +listMapKey=uid - // +patchStrategy=merge - // +patchMergeKey=uid - // +optional - repeated ResourceClaimConsumerReference reservedFor = 3; - - // DeallocationRequested indicates that a ResourceClaim is to be - // deallocated. - // - // The driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // While DeallocationRequested is set, no new consumers may be added to - // ReservedFor. - // +optional - optional bool deallocationRequested = 4; -} - -// ResourceClaimTemplate is used to produce ResourceClaim objects. -message ResourceClaimTemplate { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Describes the ResourceClaim that is to be generated. - // - // This field is immutable. A ResourceClaim will get created by the - // control plane for a Pod when needed and then not get updated - // anymore. - optional ResourceClaimTemplateSpec spec = 2; -} - -// ResourceClaimTemplateList is a collection of claim templates. -message ResourceClaimTemplateList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource claim templates. - repeated ResourceClaimTemplate items = 2; -} - -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. -message ResourceClaimTemplateSpec { - // ObjectMeta may contain labels and annotations that will be copied into the PVC - // when creating it. No other fields are allowed and will be rejected during - // validation. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec for the ResourceClaim. The entire content is copied unchanged - // into the ResourceClaim that gets created from this template. The - // same fields as in a ResourceClaim are also valid here. - optional ResourceClaimSpec spec = 2; -} - -// ResourceClass is used by administrators to influence how resources -// are allocated. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message ResourceClass { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // DriverName defines the name of the dynamic resource driver that is - // used for allocation of a ResourceClaim that uses this class. - // - // Resource drivers have a unique name in forward domain order - // (acme.example.com). - optional string driverName = 2; - - // ParametersRef references an arbitrary separate object that may hold - // parameters that will be used by the driver when allocating a - // resource that uses this class. A dynamic resource driver can - // distinguish between parameters stored here and and those stored in - // ResourceClaimSpec. - // +optional - optional ResourceClassParametersReference parametersRef = 3; - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a ResourceClaim that has not been allocated yet. - // - // Setting this field is optional. If null, all nodes are candidates. - // +optional - optional k8s.io.api.core.v1.NodeSelector suitableNodes = 4; - - // If and only if allocation of claims using this class is handled - // via structured parameters, then StructuredParameters must be set to true. - // +optional - optional bool structuredParameters = 5; -} - -// ResourceClassList is a collection of classes. -message ResourceClassList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource classes. - repeated ResourceClass items = 2; -} - -// ResourceClassParameters defines resource requests for a ResourceClass in an -// in-tree format understood by Kubernetes. -message ResourceClassParameters { - // Standard object metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the class parameters when the parameter reference of the class refers - // to some unknown type. - // +optional - optional ResourceClassParametersReference generatedFrom = 2; - - // VendorParameters are arbitrary setup parameters for all claims using - // this class. They are ignored while allocating the claim. There must - // not be more than one entry per driver. - // - // +listType=atomic - // +optional - repeated VendorParameters vendorParameters = 3; - - // Filters describes additional contraints that must be met when using the class. - // - // +listType=atomic - repeated ResourceFilter filters = 4; -} - -// ResourceClassParametersList is a collection of ResourceClassParameters. -message ResourceClassParametersList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of node resource capacity objects. - repeated ResourceClassParameters items = 2; -} - -// ResourceClassParametersReference contains enough information to let you -// locate the parameters for a ResourceClass. -message ResourceClassParametersReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata. - optional string kind = 2; - - // Name is the name of resource being referenced. - optional string name = 3; - - // Namespace that contains the referenced resource. Must be empty - // for cluster-scoped resources and non-empty for namespaced - // resources. - // +optional - optional string namespace = 4; -} - -// ResourceFilter is a filter for resources from one particular driver. -message ResourceFilter { - // DriverName is the name used by the DRA driver kubelet plugin. - optional string driverName = 1; - - optional ResourceFilterModel resourceFilterModel = 2; -} - -// ResourceFilterModel must have one and only one field set. -message ResourceFilterModel { - // NamedResources describes a resource filter using the named resources model. - // - // +optional - optional NamedResourcesFilter namedResources = 1; -} - -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. -message ResourceHandle { - // DriverName specifies the name of the resource driver whose kubelet - // plugin should be invoked to process this ResourceHandle's data once it - // lands on a node. This may differ from the DriverName set in - // ResourceClaimStatus this ResourceHandle is embedded in. - optional string driverName = 1; - - // Data contains the opaque data associated with this ResourceHandle. It is - // set by the controller component of the resource driver whose name - // matches the DriverName set in the ResourceClaimStatus this - // ResourceHandle is embedded in. It is set at allocation time and is - // intended for processing by the kubelet plugin whose name matches - // the DriverName set in this ResourceHandle. - // - // The maximum size of this field is 16KiB. This may get increased in the - // future, but not reduced. - // +optional - optional string data = 2; - - // If StructuredData is set, then it needs to be used instead of Data. - // - // +optional - optional StructuredResourceHandle structuredData = 5; -} - -// ResourceModel must have one and only one field set. -message ResourceModel { - // NamedResources describes available resources using the named resources model. - // - // +optional - optional NamedResourcesResources namedResources = 1; -} - -// ResourceRequest is a request for resources from one particular driver. -message ResourceRequest { - // VendorParameters are arbitrary setup parameters for the requested - // resource. They are ignored while allocating a claim. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 1; - - optional ResourceRequestModel resourceRequestModel = 2; -} - -// ResourceRequestModel must have one and only one field set. -message ResourceRequestModel { - // NamedResources describes a request for resources with the named resources model. - // - // +optional - optional NamedResourcesRequest namedResources = 1; -} - -// ResourceSlice provides information about available -// resources on individual nodes. -message ResourceSlice { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // NodeName identifies the node which provides the resources - // if they are local to a node. - // - // A field selector can be used to list only ResourceSlice - // objects with a certain node name. - // - // +optional - optional string nodeName = 2; - - // DriverName identifies the DRA driver providing the capacity information. - // A field selector can be used to list only ResourceSlice - // objects with a certain driver name. - optional string driverName = 3; - - optional ResourceModel resourceModel = 4; -} - -// ResourceSliceList is a collection of ResourceSlices. -message ResourceSliceList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of node resource capacity objects. - repeated ResourceSlice items = 2; -} - -// StructuredResourceHandle is the in-tree representation of the allocation result. -message StructuredResourceHandle { - // VendorClassParameters are the per-claim configuration parameters - // from the resource class at the time that the claim was allocated. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClassParameters = 1; - - // VendorClaimParameters are the per-claim configuration parameters - // from the resource claim parameters at the time that the claim was - // allocated. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClaimParameters = 2; - - // NodeName is the name of the node providing the necessary resources - // if the resources are local to a node. - // - // +optional - optional string nodeName = 4; - - // Results lists all allocated driver resources. - // - // +listType=atomic - repeated DriverAllocationResult results = 5; -} - -// VendorParameters are opaque parameters for one particular driver. -message VendorParameters { - // DriverName is the name used by the DRA driver kubelet plugin. - optional string driverName = 1; - - // Parameters can be arbitrary setup parameters. They are ignored while - // allocating a claim. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; -} - diff --git a/vendor/k8s.io/api/resource/v1alpha2/namedresources.go b/vendor/k8s.io/api/resource/v1alpha2/namedresources.go deleted file mode 100644 index b80c5c1432..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/namedresources.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -// NamedResourcesResources is used in ResourceModel. -type NamedResourcesResources struct { - // The list of all individual resources instances currently available. - // - // +listType=atomic - Instances []NamedResourcesInstance `json:"instances" protobuf:"bytes,1,name=instances"` -} - -// NamedResourcesInstance represents one individual hardware instance that can be selected based -// on its attributes. -type NamedResourcesInstance struct { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - Name string `json:"name" protobuf:"bytes,1,name=name"` - - // Attributes defines the attributes of this resource instance. - // The name of each attribute must be unique. - // - // +listType=atomic - // +optional - Attributes []NamedResourcesAttribute `json:"attributes,omitempty" protobuf:"bytes,2,opt,name=attributes"` -} - -// NamedResourcesAttribute is a combination of an attribute name and its value. -type NamedResourcesAttribute struct { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - Name string `json:"name" protobuf:"bytes,1,name=name"` - - NamedResourcesAttributeValue `json:",inline" protobuf:"bytes,2,opt,name=attributeValue"` -} - -// The Go field names below have a Value suffix to avoid a conflict between the -// field "String" and the corresponding method. That method is required. -// The Kubernetes API is defined without that suffix to keep it more natural. - -// NamedResourcesAttributeValue must have one and only one field set. -type NamedResourcesAttributeValue struct { - // QuantityValue is a quantity. - QuantityValue *resource.Quantity `json:"quantity,omitempty" protobuf:"bytes,6,opt,name=quantity"` - // BoolValue is a true/false value. - BoolValue *bool `json:"bool,omitempty" protobuf:"bytes,2,opt,name=bool"` - // IntValue is a 64-bit integer. - IntValue *int64 `json:"int,omitempty" protobuf:"varint,7,opt,name=int"` - // IntSliceValue is an array of 64-bit integers. - IntSliceValue *NamedResourcesIntSlice `json:"intSlice,omitempty" protobuf:"varint,8,rep,name=intSlice"` - // StringValue is a string. - StringValue *string `json:"string,omitempty" protobuf:"bytes,5,opt,name=string"` - // StringSliceValue is an array of strings. - StringSliceValue *NamedResourcesStringSlice `json:"stringSlice,omitempty" protobuf:"bytes,9,rep,name=stringSlice"` - // VersionValue is a semantic version according to semver.org spec 2.0.0. - VersionValue *string `json:"version,omitempty" protobuf:"bytes,10,opt,name=version"` -} - -// NamedResourcesIntSlice contains a slice of 64-bit integers. -type NamedResourcesIntSlice struct { - // Ints is the slice of 64-bit integers. - // - // +listType=atomic - Ints []int64 `json:"ints" protobuf:"bytes,1,opt,name=ints"` -} - -// NamedResourcesStringSlice contains a slice of strings. -type NamedResourcesStringSlice struct { - // Strings is the slice of strings. - // - // +listType=atomic - Strings []string `json:"strings" protobuf:"bytes,1,opt,name=strings"` -} - -// NamedResourcesRequest is used in ResourceRequestModel. -type NamedResourcesRequest struct { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - Selector string `json:"selector" protobuf:"bytes,1,name=selector"` -} - -// NamedResourcesFilter is used in ResourceFilterModel. -type NamedResourcesFilter struct { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - Selector string `json:"selector" protobuf:"bytes,1,name=selector"` -} - -// NamedResourcesAllocationResult is used in AllocationResultModel. -type NamedResourcesAllocationResult struct { - // Name is the name of the selected resource instance. - Name string `json:"name" protobuf:"bytes,1,name=name"` -} diff --git a/vendor/k8s.io/api/resource/v1alpha2/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go deleted file mode 100644 index 9005144cf6..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/types.go +++ /dev/null @@ -1,737 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -const ( - // Finalizer is the finalizer that gets set for claims - // which were allocated through a builtin controller. - Finalizer = "dra.k8s.io/delete-protection" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaim describes which resources are needed by a resource consumer. -// Its status tracks whether the resource has been allocated and what the -// resulting attributes are. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type ResourceClaim struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes the desired attributes of a resource that then needs - // to be allocated. It can only be set once when creating the - // ResourceClaim. - Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes whether the resource is available and with which - // attributes. - // +optional - Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ResourceClaimSpec defines how a resource is to be allocated. -type ResourceClaimSpec struct { - // ResourceClassName references the driver and additional parameters - // via the name of a ResourceClass that was created as part of the - // driver deployment. - ResourceClassName string `json:"resourceClassName" protobuf:"bytes,1,name=resourceClassName"` - - // ParametersRef references a separate object with arbitrary parameters - // that will be used by the driver when allocating a resource for the - // claim. - // - // The object must be in the same namespace as the ResourceClaim. - // +optional - ParametersRef *ResourceClaimParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,2,opt,name=parametersRef"` - - // Allocation can start immediately or when a Pod wants to use the - // resource. "WaitForFirstConsumer" is the default. - // +optional - AllocationMode AllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"` -} - -// AllocationMode describes whether a ResourceClaim gets allocated immediately -// when it gets created (AllocationModeImmediate) or whether allocation is -// delayed until it is needed for a Pod -// (AllocationModeWaitForFirstConsumer). Other modes might get added in the -// future. -type AllocationMode string - -const ( - // When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is - // delayed until a Pod gets scheduled that needs the ResourceClaim. The - // scheduler will consider all resource requirements of that Pod and - // trigger allocation for a node that fits the Pod. - AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer" - - // When a ResourceClaim has AllocationModeImmediate, allocation starts - // as soon as the ResourceClaim gets created. This is done without - // considering the needs of Pods that will use the ResourceClaim - // because those Pods are not known yet. - AllocationModeImmediate AllocationMode = "Immediate" -) - -// ResourceClaimStatus tracks whether the resource has been allocated and what -// the resulting attributes are. -type ResourceClaimStatus struct { - // DriverName is a copy of the driver name from the ResourceClass at - // the time when allocation started. - // +optional - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Allocation is set by the resource driver once a resource or set of - // resources has been allocated successfully. If this is not specified, the - // resources have not been allocated yet. - // +optional - Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"` - - // ReservedFor indicates which entities are currently allowed to use - // the claim. A Pod which references a ResourceClaim which is not - // reserved for that Pod will not be started. - // - // There can be at most 32 such reservations. This may get increased in - // the future, but not reduced. - // - // +listType=map - // +listMapKey=uid - // +patchStrategy=merge - // +patchMergeKey=uid - // +optional - ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` - - // DeallocationRequested indicates that a ResourceClaim is to be - // deallocated. - // - // The driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // While DeallocationRequested is set, no new consumers may be added to - // ReservedFor. - // +optional - DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"varint,4,opt,name=deallocationRequested"` -} - -// ReservedForMaxSize is the maximum number of entries in -// claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 - -// AllocationResult contains attributes of an allocated resource. -type AllocationResult struct { - // ResourceHandles contain the state associated with an allocation that - // should be maintained throughout the lifetime of a claim. Each - // ResourceHandle contains data that should be passed to a specific kubelet - // plugin once it lands on a node. This data is returned by the driver - // after a successful allocation and is opaque to Kubernetes. Driver - // documentation may explain to users how to interpret this data if needed. - // - // Setting this field is optional. It has a maximum size of 32 entries. - // If null (or empty), it is assumed this allocation will be processed by a - // single kubelet plugin with no ResourceHandle data attached. The name of - // the kubelet plugin invoked will match the DriverName set in the - // ResourceClaimStatus this AllocationResult is embedded in. - // - // +listType=atomic - // +optional - ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"` - - // This field will get set by the resource driver after it has allocated - // the resource to inform the scheduler where it can schedule Pods using - // the ResourceClaim. - // - // Setting this field is optional. If null, the resource is available - // everywhere. - // +optional - AvailableOnNodes *v1.NodeSelector `json:"availableOnNodes,omitempty" protobuf:"bytes,2,opt,name=availableOnNodes"` - - // Shareable determines whether the resource supports more - // than one consumer at a time. - // +optional - Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"` -} - -// AllocationResultResourceHandlesMaxSize represents the maximum number of -// entries in allocation.resourceHandles. -const AllocationResultResourceHandlesMaxSize = 32 - -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. -type ResourceHandle struct { - // DriverName specifies the name of the resource driver whose kubelet - // plugin should be invoked to process this ResourceHandle's data once it - // lands on a node. This may differ from the DriverName set in - // ResourceClaimStatus this ResourceHandle is embedded in. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Data contains the opaque data associated with this ResourceHandle. It is - // set by the controller component of the resource driver whose name - // matches the DriverName set in the ResourceClaimStatus this - // ResourceHandle is embedded in. It is set at allocation time and is - // intended for processing by the kubelet plugin whose name matches - // the DriverName set in this ResourceHandle. - // - // The maximum size of this field is 16KiB. This may get increased in the - // future, but not reduced. - // +optional - Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - - // If StructuredData is set, then it needs to be used instead of Data. - // - // +optional - StructuredData *StructuredResourceHandle `json:"structuredData,omitempty" protobuf:"bytes,5,opt,name=structuredData"` -} - -// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data. -const ResourceHandleDataMaxSize = 16 * 1024 - -// StructuredResourceHandle is the in-tree representation of the allocation result. -type StructuredResourceHandle struct { - // VendorClassParameters are the per-claim configuration parameters - // from the resource class at the time that the claim was allocated. - // - // +optional - VendorClassParameters runtime.RawExtension `json:"vendorClassParameters,omitempty" protobuf:"bytes,1,opt,name=vendorClassParameters"` - - // VendorClaimParameters are the per-claim configuration parameters - // from the resource claim parameters at the time that the claim was - // allocated. - // - // +optional - VendorClaimParameters runtime.RawExtension `json:"vendorClaimParameters,omitempty" protobuf:"bytes,2,opt,name=vendorClaimParameters"` - - // NodeName is the name of the node providing the necessary resources - // if the resources are local to a node. - // - // +optional - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,4,name=nodeName"` - - // Results lists all allocated driver resources. - // - // +listType=atomic - Results []DriverAllocationResult `json:"results" protobuf:"bytes,5,name=results"` -} - -// DriverAllocationResult contains vendor parameters and the allocation result for -// one request. -type DriverAllocationResult struct { - // VendorRequestParameters are the per-request configuration parameters - // from the time that the claim was allocated. - // - // +optional - VendorRequestParameters runtime.RawExtension `json:"vendorRequestParameters,omitempty" protobuf:"bytes,1,opt,name=vendorRequestParameters"` - - AllocationResultModel `json:",inline" protobuf:"bytes,2,name=allocationResultModel"` -} - -// AllocationResultModel must have one and only one field set. -type AllocationResultModel struct { - // NamedResources describes the allocation result when using the named resources model. - // - // +optional - NamedResources *NamedResourcesAllocationResult `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimList is a collection of claims. -type ResourceClaimList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource claims. - Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type PodSchedulingContext struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes where resources for the Pod are needed. - Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes where resources for the Pod can be allocated. - // +optional - Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -type PodSchedulingContextSpec struct { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // +optional - SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -type PodSchedulingContextStatus struct { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` - - // If there ever is a need to support other kinds of resources - // than ResourceClaim, then new fields could get added here - // for those other resources. -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -type ResourceClaimSchedulingStatus struct { - // Name matches the pod.spec.resourceClaims[*].Name field. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` -} - -// PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodSchedulingContext objects. This limit is part -// of the API. -const PodSchedulingNodeListMaxSize = 128 - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContextList is a collection of Pod scheduling objects. -type PodSchedulingContextList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of PodSchedulingContext objects. - Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClass is used by administrators to influence how resources -// are allocated. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type ResourceClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // DriverName defines the name of the dynamic resource driver that is - // used for allocation of a ResourceClaim that uses this class. - // - // Resource drivers have a unique name in forward domain order - // (acme.example.com). - DriverName string `json:"driverName" protobuf:"bytes,2,name=driverName"` - - // ParametersRef references an arbitrary separate object that may hold - // parameters that will be used by the driver when allocating a - // resource that uses this class. A dynamic resource driver can - // distinguish between parameters stored here and and those stored in - // ResourceClaimSpec. - // +optional - ParametersRef *ResourceClassParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,3,opt,name=parametersRef"` - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a ResourceClaim that has not been allocated yet. - // - // Setting this field is optional. If null, all nodes are candidates. - // +optional - SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,4,opt,name=suitableNodes"` - - // If and only if allocation of claims using this class is handled - // via structured parameters, then StructuredParameters must be set to true. - // +optional - StructuredParameters *bool `json:"structuredParameters,omitempty" protobuf:"bytes,5,opt,name=structuredParameters"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClassList is a collection of classes. -type ResourceClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource classes. - Items []ResourceClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ResourceClassParametersReference contains enough information to let you -// locate the parameters for a ResourceClass. -type ResourceClassParametersReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata. - Kind string `json:"kind" protobuf:"bytes,2,name=kind"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,3,name=name"` - // Namespace that contains the referenced resource. Must be empty - // for cluster-scoped resources and non-empty for namespaced - // resources. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` -} - -// ResourceClaimParametersReference contains enough information to let you -// locate the parameters for a ResourceClaim. The object must be in the same -// namespace as the ResourceClaim. -type ResourceClaimParametersReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata, for example "ConfigMap". - Kind string `json:"kind" protobuf:"bytes,2,name=kind"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,3,name=name"` -} - -// ResourceClaimConsumerReference contains enough information to let you -// locate the consumer of a ResourceClaim. The user must be a resource in the same -// namespace as the ResourceClaim. -type ResourceClaimConsumerReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Resource is the type of resource being referenced, for example "pods". - Resource string `json:"resource" protobuf:"bytes,3,name=resource"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,4,name=name"` - // UID identifies exactly one incarnation of the resource. - UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimTemplate is used to produce ResourceClaim objects. -type ResourceClaimTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Describes the ResourceClaim that is to be generated. - // - // This field is immutable. A ResourceClaim will get created by the - // control plane for a Pod when needed and then not get updated - // anymore. - Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. -type ResourceClaimTemplateSpec struct { - // ObjectMeta may contain labels and annotations that will be copied into the PVC - // when creating it. No other fields are allowed and will be rejected during - // validation. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec for the ResourceClaim. The entire content is copied unchanged - // into the ResourceClaim that gets created from this template. The - // same fields as in a ResourceClaim are also valid here. - Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimTemplateList is a collection of claim templates. -type ResourceClaimTemplateList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource claim templates. - Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceSlice provides information about available -// resources on individual nodes. -type ResourceSlice struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // NodeName identifies the node which provides the resources - // if they are local to a node. - // - // A field selector can be used to list only ResourceSlice - // objects with a certain node name. - // - // +optional - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,2,opt,name=nodeName"` - - // DriverName identifies the DRA driver providing the capacity information. - // A field selector can be used to list only ResourceSlice - // objects with a certain driver name. - DriverName string `json:"driverName" protobuf:"bytes,3,name=driverName"` - - ResourceModel `json:",inline" protobuf:"bytes,4,name=resourceModel"` -} - -// ResourceModel must have one and only one field set. -type ResourceModel struct { - // NamedResources describes available resources using the named resources model. - // - // +optional - NamedResources *NamedResourcesResources `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceSliceList is a collection of ResourceSlices. -type ResourceSliceList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of node resource capacity objects. - Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClaimParameters defines resource requests for a ResourceClaim in an -// in-tree format understood by Kubernetes. -type ResourceClaimParameters struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the claim parameters when the parameter reference of the claim refers - // to some unknown type. - // +optional - GeneratedFrom *ResourceClaimParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"` - - // Shareable indicates whether the allocated claim is meant to be shareable - // by multiple consumers at the same time. - // +optional - Shareable bool `json:"shareable,omitempty" protobuf:"bytes,3,opt,name=shareable"` - - // DriverRequests describes all resources that are needed for the - // allocated claim. A single claim may use resources coming from - // different drivers. For each driver, this array has at most one - // entry which then may have one or more per-driver requests. - // - // May be empty, in which case the claim can always be allocated. - // - // +listType=atomic - DriverRequests []DriverRequests `json:"driverRequests,omitempty" protobuf:"bytes,4,opt,name=driverRequests"` -} - -// DriverRequests describes all resources that are needed from one particular driver. -type DriverRequests struct { - // DriverName is the name used by the DRA driver kubelet plugin. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // VendorParameters are arbitrary setup parameters for all requests of the - // claim. They are ignored while allocating the claim. - // - // +optional - VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,2,opt,name=vendorParameters"` - - // Requests describes all resources that are needed from the driver. - // +listType=atomic - Requests []ResourceRequest `json:"requests,omitempty" protobuf:"bytes,3,opt,name=requests"` -} - -// ResourceRequest is a request for resources from one particular driver. -type ResourceRequest struct { - // VendorParameters are arbitrary setup parameters for the requested - // resource. They are ignored while allocating a claim. - // - // +optional - VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,1,opt,name=vendorParameters"` - - ResourceRequestModel `json:",inline" protobuf:"bytes,2,name=resourceRequestModel"` -} - -// ResourceRequestModel must have one and only one field set. -type ResourceRequestModel struct { - // NamedResources describes a request for resources with the named resources model. - // - // +optional - NamedResources *NamedResourcesRequest `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClaimParametersList is a collection of ResourceClaimParameters. -type ResourceClaimParametersList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of node resource capacity objects. - Items []ResourceClaimParameters `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClassParameters defines resource requests for a ResourceClass in an -// in-tree format understood by Kubernetes. -type ResourceClassParameters struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the class parameters when the parameter reference of the class refers - // to some unknown type. - // +optional - GeneratedFrom *ResourceClassParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"` - - // VendorParameters are arbitrary setup parameters for all claims using - // this class. They are ignored while allocating the claim. There must - // not be more than one entry per driver. - // - // +listType=atomic - // +optional - VendorParameters []VendorParameters `json:"vendorParameters,omitempty" protobuf:"bytes,3,opt,name=vendorParameters"` - - // Filters describes additional contraints that must be met when using the class. - // - // +listType=atomic - Filters []ResourceFilter `json:"filters,omitempty" protobuf:"bytes,4,opt,name=filters"` -} - -// ResourceFilter is a filter for resources from one particular driver. -type ResourceFilter struct { - // DriverName is the name used by the DRA driver kubelet plugin. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - ResourceFilterModel `json:",inline" protobuf:"bytes,2,name=resourceFilterModel"` -} - -// ResourceFilterModel must have one and only one field set. -type ResourceFilterModel struct { - // NamedResources describes a resource filter using the named resources model. - // - // +optional - NamedResources *NamedResourcesFilter `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClassParametersList is a collection of ResourceClassParameters. -type ResourceClassParametersList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of node resource capacity objects. - Items []ResourceClassParameters `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VendorParameters are opaque parameters for one particular driver. -type VendorParameters struct { - // DriverName is the name used by the DRA driver kubelet plugin. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Parameters can be arbitrary setup parameters. They are ignored while - // allocating a claim. - // - // +optional - Parameters runtime.RawExtension `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"` -} diff --git a/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go deleted file mode 100644 index 11f9ffbead..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllocationResult = map[string]string{ - "": "AllocationResult contains attributes of an allocated resource.", - "resourceHandles": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", - "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", - "shareable": "Shareable determines whether the resource supports more than one consumer at a time.", -} - -func (AllocationResult) SwaggerDoc() map[string]string { - return map_AllocationResult -} - -var map_AllocationResultModel = map[string]string{ - "": "AllocationResultModel must have one and only one field set.", - "namedResources": "NamedResources describes the allocation result when using the named resources model.", -} - -func (AllocationResultModel) SwaggerDoc() map[string]string { - return map_AllocationResultModel -} - -var map_DriverAllocationResult = map[string]string{ - "": "DriverAllocationResult contains vendor parameters and the allocation result for one request.", - "vendorRequestParameters": "VendorRequestParameters are the per-request configuration parameters from the time that the claim was allocated.", -} - -func (DriverAllocationResult) SwaggerDoc() map[string]string { - return map_DriverAllocationResult -} - -var map_DriverRequests = map[string]string{ - "": "DriverRequests describes all resources that are needed from one particular driver.", - "driverName": "DriverName is the name used by the DRA driver kubelet plugin.", - "vendorParameters": "VendorParameters are arbitrary setup parameters for all requests of the claim. They are ignored while allocating the claim.", - "requests": "Requests describes all resources that are needed from the driver.", -} - -func (DriverRequests) SwaggerDoc() map[string]string { - return map_DriverRequests -} - -var map_PodSchedulingContext = map[string]string{ - "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes where resources for the Pod are needed.", - "status": "Status describes where resources for the Pod can be allocated.", -} - -func (PodSchedulingContext) SwaggerDoc() map[string]string { - return map_PodSchedulingContext -} - -var map_PodSchedulingContextList = map[string]string{ - "": "PodSchedulingContextList is a collection of Pod scheduling objects.", - "metadata": "Standard list metadata", - "items": "Items is the list of PodSchedulingContext objects.", -} - -func (PodSchedulingContextList) SwaggerDoc() map[string]string { - return map_PodSchedulingContextList -} - -var map_PodSchedulingContextSpec = map[string]string{ - "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", - "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", -} - -func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingContextSpec -} - -var map_PodSchedulingContextStatus = map[string]string{ - "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", -} - -func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingContextStatus -} - -var map_ResourceClaim = map[string]string{ - "": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.", - "status": "Status describes whether the resource is available and with which attributes.", -} - -func (ResourceClaim) SwaggerDoc() map[string]string { - return map_ResourceClaim -} - -var map_ResourceClaimConsumerReference = map[string]string{ - "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "resource": "Resource is the type of resource being referenced, for example \"pods\".", - "name": "Name is the name of resource being referenced.", - "uid": "UID identifies exactly one incarnation of the resource.", -} - -func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { - return map_ResourceClaimConsumerReference -} - -var map_ResourceClaimList = map[string]string{ - "": "ResourceClaimList is a collection of claims.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource claims.", -} - -func (ResourceClaimList) SwaggerDoc() map[string]string { - return map_ResourceClaimList -} - -var map_ResourceClaimParameters = map[string]string{ - "": "ResourceClaimParameters defines resource requests for a ResourceClaim in an in-tree format understood by Kubernetes.", - "metadata": "Standard object metadata", - "generatedFrom": "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the claim parameters when the parameter reference of the claim refers to some unknown type.", - "shareable": "Shareable indicates whether the allocated claim is meant to be shareable by multiple consumers at the same time.", - "driverRequests": "DriverRequests describes all resources that are needed for the allocated claim. A single claim may use resources coming from different drivers. For each driver, this array has at most one entry which then may have one or more per-driver requests.\n\nMay be empty, in which case the claim can always be allocated.", -} - -func (ResourceClaimParameters) SwaggerDoc() map[string]string { - return map_ResourceClaimParameters -} - -var map_ResourceClaimParametersList = map[string]string{ - "": "ResourceClaimParametersList is a collection of ResourceClaimParameters.", - "metadata": "Standard list metadata", - "items": "Items is the list of node resource capacity objects.", -} - -func (ResourceClaimParametersList) SwaggerDoc() map[string]string { - return map_ResourceClaimParametersList -} - -var map_ResourceClaimParametersReference = map[string]string{ - "": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", - "name": "Name is the name of resource being referenced.", -} - -func (ResourceClaimParametersReference) SwaggerDoc() map[string]string { - return map_ResourceClaimParametersReference -} - -var map_ResourceClaimSchedulingStatus = map[string]string{ - "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "name": "Name matches the pod.spec.resourceClaims[*].Name field.", - "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", -} - -func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimSchedulingStatus -} - -var map_ResourceClaimSpec = map[string]string{ - "": "ResourceClaimSpec defines how a resource is to be allocated.", - "resourceClassName": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", - "parametersRef": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.", - "allocationMode": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", -} - -func (ResourceClaimSpec) SwaggerDoc() map[string]string { - return map_ResourceClaimSpec -} - -var map_ResourceClaimStatus = map[string]string{ - "": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", - "driverName": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "allocation": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", - "deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", -} - -func (ResourceClaimStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimStatus -} - -var map_ResourceClaimTemplate = map[string]string{ - "": "ResourceClaimTemplate is used to produce ResourceClaim objects.", - "metadata": "Standard object metadata", - "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", -} - -func (ResourceClaimTemplate) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplate -} - -var map_ResourceClaimTemplateList = map[string]string{ - "": "ResourceClaimTemplateList is a collection of claim templates.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource claim templates.", -} - -func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplateList -} - -var map_ResourceClaimTemplateSpec = map[string]string{ - "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", - "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", - "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", -} - -func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplateSpec -} - -var map_ResourceClass = map[string]string{ - "": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "driverName": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", - "parametersRef": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.", - "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.", - "structuredParameters": "If and only if allocation of claims using this class is handled via structured parameters, then StructuredParameters must be set to true.", -} - -func (ResourceClass) SwaggerDoc() map[string]string { - return map_ResourceClass -} - -var map_ResourceClassList = map[string]string{ - "": "ResourceClassList is a collection of classes.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource classes.", -} - -func (ResourceClassList) SwaggerDoc() map[string]string { - return map_ResourceClassList -} - -var map_ResourceClassParameters = map[string]string{ - "": "ResourceClassParameters defines resource requests for a ResourceClass in an in-tree format understood by Kubernetes.", - "metadata": "Standard object metadata", - "generatedFrom": "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the class parameters when the parameter reference of the class refers to some unknown type.", - "vendorParameters": "VendorParameters are arbitrary setup parameters for all claims using this class. They are ignored while allocating the claim. There must not be more than one entry per driver.", - "filters": "Filters describes additional contraints that must be met when using the class.", -} - -func (ResourceClassParameters) SwaggerDoc() map[string]string { - return map_ResourceClassParameters -} - -var map_ResourceClassParametersList = map[string]string{ - "": "ResourceClassParametersList is a collection of ResourceClassParameters.", - "metadata": "Standard list metadata", - "items": "Items is the list of node resource capacity objects.", -} - -func (ResourceClassParametersList) SwaggerDoc() map[string]string { - return map_ResourceClassParametersList -} - -var map_ResourceClassParametersReference = map[string]string{ - "": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", - "name": "Name is the name of resource being referenced.", - "namespace": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", -} - -func (ResourceClassParametersReference) SwaggerDoc() map[string]string { - return map_ResourceClassParametersReference -} - -var map_ResourceFilter = map[string]string{ - "": "ResourceFilter is a filter for resources from one particular driver.", - "driverName": "DriverName is the name used by the DRA driver kubelet plugin.", -} - -func (ResourceFilter) SwaggerDoc() map[string]string { - return map_ResourceFilter -} - -var map_ResourceFilterModel = map[string]string{ - "": "ResourceFilterModel must have one and only one field set.", - "namedResources": "NamedResources describes a resource filter using the named resources model.", -} - -func (ResourceFilterModel) SwaggerDoc() map[string]string { - return map_ResourceFilterModel -} - -var map_ResourceHandle = map[string]string{ - "": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", - "driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", - "data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", - "structuredData": "If StructuredData is set, then it needs to be used instead of Data.", -} - -func (ResourceHandle) SwaggerDoc() map[string]string { - return map_ResourceHandle -} - -var map_ResourceModel = map[string]string{ - "": "ResourceModel must have one and only one field set.", - "namedResources": "NamedResources describes available resources using the named resources model.", -} - -func (ResourceModel) SwaggerDoc() map[string]string { - return map_ResourceModel -} - -var map_ResourceRequest = map[string]string{ - "": "ResourceRequest is a request for resources from one particular driver.", - "vendorParameters": "VendorParameters are arbitrary setup parameters for the requested resource. They are ignored while allocating a claim.", -} - -func (ResourceRequest) SwaggerDoc() map[string]string { - return map_ResourceRequest -} - -var map_ResourceRequestModel = map[string]string{ - "": "ResourceRequestModel must have one and only one field set.", - "namedResources": "NamedResources describes a request for resources with the named resources model.", -} - -func (ResourceRequestModel) SwaggerDoc() map[string]string { - return map_ResourceRequestModel -} - -var map_ResourceSlice = map[string]string{ - "": "ResourceSlice provides information about available resources on individual nodes.", - "metadata": "Standard object metadata", - "nodeName": "NodeName identifies the node which provides the resources if they are local to a node.\n\nA field selector can be used to list only ResourceSlice objects with a certain node name.", - "driverName": "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.", -} - -func (ResourceSlice) SwaggerDoc() map[string]string { - return map_ResourceSlice -} - -var map_ResourceSliceList = map[string]string{ - "": "ResourceSliceList is a collection of ResourceSlices.", - "metadata": "Standard list metadata", - "items": "Items is the list of node resource capacity objects.", -} - -func (ResourceSliceList) SwaggerDoc() map[string]string { - return map_ResourceSliceList -} - -var map_StructuredResourceHandle = map[string]string{ - "": "StructuredResourceHandle is the in-tree representation of the allocation result.", - "vendorClassParameters": "VendorClassParameters are the per-claim configuration parameters from the resource class at the time that the claim was allocated.", - "vendorClaimParameters": "VendorClaimParameters are the per-claim configuration parameters from the resource claim parameters at the time that the claim was allocated.", - "nodeName": "NodeName is the name of the node providing the necessary resources if the resources are local to a node.", - "results": "Results lists all allocated driver resources.", -} - -func (StructuredResourceHandle) SwaggerDoc() map[string]string { - return map_StructuredResourceHandle -} - -var map_VendorParameters = map[string]string{ - "": "VendorParameters are opaque parameters for one particular driver.", - "driverName": "DriverName is the name used by the DRA driver kubelet plugin.", - "parameters": "Parameters can be arbitrary setup parameters. They are ignored while allocating a claim.", -} - -func (VendorParameters) SwaggerDoc() map[string]string { - return map_VendorParameters -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha2/doc.go b/vendor/k8s.io/api/resource/v1alpha3/doc.go similarity index 84% rename from vendor/k8s.io/api/resource/v1alpha2/doc.go rename to vendor/k8s.io/api/resource/v1alpha3/doc.go index d9c20e089d..aeb66561fb 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/doc.go +++ b/vendor/k8s.io/api/resource/v1alpha3/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +groupName=resource.k8s.io -// Package v1alpha2 is the v1alpha2 version of the resource API. -package v1alpha2 // import "k8s.io/api/resource/v1alpha2" +// Package v1alpha3 is the v1alpha3 version of the resource API. +package v1alpha3 // import "k8s.io/api/resource/v1alpha3" diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go similarity index 60% rename from vendor/k8s.io/api/resource/v1alpha2/generated.pb.go rename to vendor/k8s.io/api/resource/v1alpha3/generated.pb.go index 6c6ba438e3..4ac01cc6f3 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/resource/v1alpha2/generated.proto +// source: k8s.io/api/resource/v1alpha3/generated.proto -package v1alpha2 +package v1alpha3 import ( fmt "fmt" @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" v1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" @@ -50,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AllocationResult) Reset() { *m = AllocationResult{} } func (*AllocationResult) ProtoMessage() {} func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{0} + return fileDescriptor_66649ee9bbcd89d2, []int{0} } func (m *AllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,15 +76,15 @@ func (m *AllocationResult) XXX_DiscardUnknown() { var xxx_messageInfo_AllocationResult proto.InternalMessageInfo -func (m *AllocationResultModel) Reset() { *m = AllocationResultModel{} } -func (*AllocationResultModel) ProtoMessage() {} -func (*AllocationResultModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{1} +func (m *BasicDevice) Reset() { *m = BasicDevice{} } +func (*BasicDevice) ProtoMessage() {} +func (*BasicDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{1} } -func (m *AllocationResultModel) XXX_Unmarshal(b []byte) error { +func (m *BasicDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *AllocationResultModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -91,27 +92,27 @@ func (m *AllocationResultModel) XXX_Marshal(b []byte, deterministic bool) ([]byt } return b[:n], nil } -func (m *AllocationResultModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocationResultModel.Merge(m, src) +func (m *BasicDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicDevice.Merge(m, src) } -func (m *AllocationResultModel) XXX_Size() int { +func (m *BasicDevice) XXX_Size() int { return m.Size() } -func (m *AllocationResultModel) XXX_DiscardUnknown() { - xxx_messageInfo_AllocationResultModel.DiscardUnknown(m) +func (m *BasicDevice) XXX_DiscardUnknown() { + xxx_messageInfo_BasicDevice.DiscardUnknown(m) } -var xxx_messageInfo_AllocationResultModel proto.InternalMessageInfo +var xxx_messageInfo_BasicDevice proto.InternalMessageInfo -func (m *DriverAllocationResult) Reset() { *m = DriverAllocationResult{} } -func (*DriverAllocationResult) ProtoMessage() {} -func (*DriverAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{2} +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{2} } -func (m *DriverAllocationResult) XXX_Unmarshal(b []byte) error { +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *DriverAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -119,27 +120,27 @@ func (m *DriverAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]by } return b[:n], nil } -func (m *DriverAllocationResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_DriverAllocationResult.Merge(m, src) +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) } -func (m *DriverAllocationResult) XXX_Size() int { +func (m *CELDeviceSelector) XXX_Size() int { return m.Size() } -func (m *DriverAllocationResult) XXX_DiscardUnknown() { - xxx_messageInfo_DriverAllocationResult.DiscardUnknown(m) +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) } -var xxx_messageInfo_DriverAllocationResult proto.InternalMessageInfo +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo -func (m *DriverRequests) Reset() { *m = DriverRequests{} } -func (*DriverRequests) ProtoMessage() {} -func (*DriverRequests) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{3} +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{3} } -func (m *DriverRequests) XXX_Unmarshal(b []byte) error { +func (m *Device) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *DriverRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -147,27 +148,27 @@ func (m *DriverRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro } return b[:n], nil } -func (m *DriverRequests) XXX_Merge(src proto.Message) { - xxx_messageInfo_DriverRequests.Merge(m, src) +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) } -func (m *DriverRequests) XXX_Size() int { +func (m *Device) XXX_Size() int { return m.Size() } -func (m *DriverRequests) XXX_DiscardUnknown() { - xxx_messageInfo_DriverRequests.DiscardUnknown(m) +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) } -var xxx_messageInfo_DriverRequests proto.InternalMessageInfo +var xxx_messageInfo_Device proto.InternalMessageInfo -func (m *NamedResourcesAllocationResult) Reset() { *m = NamedResourcesAllocationResult{} } -func (*NamedResourcesAllocationResult) ProtoMessage() {} -func (*NamedResourcesAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{4} +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{4} } -func (m *NamedResourcesAllocationResult) XXX_Unmarshal(b []byte) error { +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -175,27 +176,27 @@ func (m *NamedResourcesAllocationResult) XXX_Marshal(b []byte, deterministic boo } return b[:n], nil } -func (m *NamedResourcesAllocationResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesAllocationResult.Merge(m, src) +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) } -func (m *NamedResourcesAllocationResult) XXX_Size() int { +func (m *DeviceAllocationConfiguration) XXX_Size() int { return m.Size() } -func (m *NamedResourcesAllocationResult) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesAllocationResult.DiscardUnknown(m) +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesAllocationResult proto.InternalMessageInfo +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo -func (m *NamedResourcesAttribute) Reset() { *m = NamedResourcesAttribute{} } -func (*NamedResourcesAttribute) ProtoMessage() {} -func (*NamedResourcesAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{5} +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{5} } -func (m *NamedResourcesAttribute) XXX_Unmarshal(b []byte) error { +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -203,27 +204,27 @@ func (m *NamedResourcesAttribute) XXX_Marshal(b []byte, deterministic bool) ([]b } return b[:n], nil } -func (m *NamedResourcesAttribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesAttribute.Merge(m, src) +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) } -func (m *NamedResourcesAttribute) XXX_Size() int { +func (m *DeviceAllocationResult) XXX_Size() int { return m.Size() } -func (m *NamedResourcesAttribute) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesAttribute.DiscardUnknown(m) +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesAttribute proto.InternalMessageInfo +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo -func (m *NamedResourcesAttributeValue) Reset() { *m = NamedResourcesAttributeValue{} } -func (*NamedResourcesAttributeValue) ProtoMessage() {} -func (*NamedResourcesAttributeValue) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{6} +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{6} } -func (m *NamedResourcesAttributeValue) XXX_Unmarshal(b []byte) error { +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesAttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -231,27 +232,27 @@ func (m *NamedResourcesAttributeValue) XXX_Marshal(b []byte, deterministic bool) } return b[:n], nil } -func (m *NamedResourcesAttributeValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesAttributeValue.Merge(m, src) +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) } -func (m *NamedResourcesAttributeValue) XXX_Size() int { +func (m *DeviceAttribute) XXX_Size() int { return m.Size() } -func (m *NamedResourcesAttributeValue) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesAttributeValue.DiscardUnknown(m) +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesAttributeValue proto.InternalMessageInfo +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo -func (m *NamedResourcesFilter) Reset() { *m = NamedResourcesFilter{} } -func (*NamedResourcesFilter) ProtoMessage() {} -func (*NamedResourcesFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{7} +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{7} } -func (m *NamedResourcesFilter) XXX_Unmarshal(b []byte) error { +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -259,27 +260,27 @@ func (m *NamedResourcesFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte } return b[:n], nil } -func (m *NamedResourcesFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesFilter.Merge(m, src) +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) } -func (m *NamedResourcesFilter) XXX_Size() int { +func (m *DeviceClaim) XXX_Size() int { return m.Size() } -func (m *NamedResourcesFilter) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesFilter.DiscardUnknown(m) +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesFilter proto.InternalMessageInfo +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo -func (m *NamedResourcesInstance) Reset() { *m = NamedResourcesInstance{} } -func (*NamedResourcesInstance) ProtoMessage() {} -func (*NamedResourcesInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{8} +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{8} } -func (m *NamedResourcesInstance) XXX_Unmarshal(b []byte) error { +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -287,27 +288,27 @@ func (m *NamedResourcesInstance) XXX_Marshal(b []byte, deterministic bool) ([]by } return b[:n], nil } -func (m *NamedResourcesInstance) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesInstance.Merge(m, src) +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) } -func (m *NamedResourcesInstance) XXX_Size() int { +func (m *DeviceClaimConfiguration) XXX_Size() int { return m.Size() } -func (m *NamedResourcesInstance) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesInstance.DiscardUnknown(m) +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesInstance proto.InternalMessageInfo +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo -func (m *NamedResourcesIntSlice) Reset() { *m = NamedResourcesIntSlice{} } -func (*NamedResourcesIntSlice) ProtoMessage() {} -func (*NamedResourcesIntSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{9} +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{9} } -func (m *NamedResourcesIntSlice) XXX_Unmarshal(b []byte) error { +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesIntSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -315,27 +316,27 @@ func (m *NamedResourcesIntSlice) XXX_Marshal(b []byte, deterministic bool) ([]by } return b[:n], nil } -func (m *NamedResourcesIntSlice) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesIntSlice.Merge(m, src) +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) } -func (m *NamedResourcesIntSlice) XXX_Size() int { +func (m *DeviceClass) XXX_Size() int { return m.Size() } -func (m *NamedResourcesIntSlice) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesIntSlice.DiscardUnknown(m) +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesIntSlice proto.InternalMessageInfo +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo -func (m *NamedResourcesRequest) Reset() { *m = NamedResourcesRequest{} } -func (*NamedResourcesRequest) ProtoMessage() {} -func (*NamedResourcesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{10} +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{10} } -func (m *NamedResourcesRequest) XXX_Unmarshal(b []byte) error { +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -343,27 +344,27 @@ func (m *NamedResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt } return b[:n], nil } -func (m *NamedResourcesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesRequest.Merge(m, src) +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) } -func (m *NamedResourcesRequest) XXX_Size() int { +func (m *DeviceClassConfiguration) XXX_Size() int { return m.Size() } -func (m *NamedResourcesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesRequest.DiscardUnknown(m) +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesRequest proto.InternalMessageInfo +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo -func (m *NamedResourcesResources) Reset() { *m = NamedResourcesResources{} } -func (*NamedResourcesResources) ProtoMessage() {} -func (*NamedResourcesResources) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{11} +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{11} } -func (m *NamedResourcesResources) XXX_Unmarshal(b []byte) error { +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -371,27 +372,27 @@ func (m *NamedResourcesResources) XXX_Marshal(b []byte, deterministic bool) ([]b } return b[:n], nil } -func (m *NamedResourcesResources) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesResources.Merge(m, src) +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) } -func (m *NamedResourcesResources) XXX_Size() int { +func (m *DeviceClassList) XXX_Size() int { return m.Size() } -func (m *NamedResourcesResources) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesResources.DiscardUnknown(m) +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesResources proto.InternalMessageInfo +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo -func (m *NamedResourcesStringSlice) Reset() { *m = NamedResourcesStringSlice{} } -func (*NamedResourcesStringSlice) ProtoMessage() {} -func (*NamedResourcesStringSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{12} +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{12} } -func (m *NamedResourcesStringSlice) XXX_Unmarshal(b []byte) error { +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NamedResourcesStringSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -399,22 +400,190 @@ func (m *NamedResourcesStringSlice) XXX_Marshal(b []byte, deterministic bool) ([ } return b[:n], nil } -func (m *NamedResourcesStringSlice) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesStringSlice.Merge(m, src) +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) } -func (m *NamedResourcesStringSlice) XXX_Size() int { +func (m *DeviceClassSpec) XXX_Size() int { return m.Size() } -func (m *NamedResourcesStringSlice) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesStringSlice.DiscardUnknown(m) +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) } -var xxx_messageInfo_NamedResourcesStringSlice proto.InternalMessageInfo +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{13} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{14} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{15} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{16} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{17} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{18} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } func (*PodSchedulingContext) ProtoMessage() {} func (*PodSchedulingContext) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{13} + return fileDescriptor_66649ee9bbcd89d2, []int{19} } func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +611,7 @@ var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } func (*PodSchedulingContextList) ProtoMessage() {} func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{14} + return fileDescriptor_66649ee9bbcd89d2, []int{20} } func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +639,7 @@ var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } func (*PodSchedulingContextSpec) ProtoMessage() {} func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{15} + return fileDescriptor_66649ee9bbcd89d2, []int{21} } func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +667,7 @@ var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } func (*PodSchedulingContextStatus) ProtoMessage() {} func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{16} + return fileDescriptor_66649ee9bbcd89d2, []int{22} } func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +695,7 @@ var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{17} + return fileDescriptor_66649ee9bbcd89d2, []int{23} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +723,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{18} + return fileDescriptor_66649ee9bbcd89d2, []int{24} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +751,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{19} + return fileDescriptor_66649ee9bbcd89d2, []int{25} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -607,94 +776,10 @@ func (m *ResourceClaimList) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo -func (m *ResourceClaimParameters) Reset() { *m = ResourceClaimParameters{} } -func (*ResourceClaimParameters) ProtoMessage() {} -func (*ResourceClaimParameters) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{20} -} -func (m *ResourceClaimParameters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimParameters) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimParameters.Merge(m, src) -} -func (m *ResourceClaimParameters) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimParameters) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimParameters.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimParameters proto.InternalMessageInfo - -func (m *ResourceClaimParametersList) Reset() { *m = ResourceClaimParametersList{} } -func (*ResourceClaimParametersList) ProtoMessage() {} -func (*ResourceClaimParametersList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{21} -} -func (m *ResourceClaimParametersList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimParametersList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimParametersList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimParametersList.Merge(m, src) -} -func (m *ResourceClaimParametersList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimParametersList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimParametersList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimParametersList proto.InternalMessageInfo - -func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } -func (*ResourceClaimParametersReference) ProtoMessage() {} -func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{22} -} -func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimParametersReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimParametersReference.Merge(m, src) -} -func (m *ResourceClaimParametersReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimParametersReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimParametersReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo - func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } func (*ResourceClaimSchedulingStatus) ProtoMessage() {} func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{23} + return fileDescriptor_66649ee9bbcd89d2, []int{26} } func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -722,7 +807,7 @@ var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{24} + return fileDescriptor_66649ee9bbcd89d2, []int{27} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -750,7 +835,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{25} + return fileDescriptor_66649ee9bbcd89d2, []int{28} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -778,7 +863,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{26} + return fileDescriptor_66649ee9bbcd89d2, []int{29} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -806,7 +891,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{27} + return fileDescriptor_66649ee9bbcd89d2, []int{30} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -834,7 +919,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{28} + return fileDescriptor_66649ee9bbcd89d2, []int{31} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,15 +944,15 @@ func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo -func (m *ResourceClass) Reset() { *m = ResourceClass{} } -func (*ResourceClass) ProtoMessage() {} -func (*ResourceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{29} +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{32} } -func (m *ResourceClass) XXX_Unmarshal(b []byte) error { +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResourceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -875,27 +960,27 @@ func (m *ResourceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *ResourceClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClass.Merge(m, src) +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) } -func (m *ResourceClass) XXX_Size() int { +func (m *ResourcePool) XXX_Size() int { return m.Size() } -func (m *ResourceClass) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClass.DiscardUnknown(m) +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) } -var xxx_messageInfo_ResourceClass proto.InternalMessageInfo +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo -func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } -func (*ResourceClassList) ProtoMessage() {} -func (*ResourceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{30} +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{33} } -func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResourceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -903,27 +988,27 @@ func (m *ResourceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *ResourceClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassList.Merge(m, src) +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) } -func (m *ResourceClassList) XXX_Size() int { +func (m *ResourceSlice) XXX_Size() int { return m.Size() } -func (m *ResourceClassList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassList.DiscardUnknown(m) +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) } -var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo -func (m *ResourceClassParameters) Reset() { *m = ResourceClassParameters{} } -func (*ResourceClassParameters) ProtoMessage() {} -func (*ResourceClassParameters) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{31} +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{34} } -func (m *ResourceClassParameters) XXX_Unmarshal(b []byte) error { +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResourceClassParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -931,27 +1016,27 @@ func (m *ResourceClassParameters) XXX_Marshal(b []byte, deterministic bool) ([]b } return b[:n], nil } -func (m *ResourceClassParameters) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassParameters.Merge(m, src) +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) } -func (m *ResourceClassParameters) XXX_Size() int { +func (m *ResourceSliceList) XXX_Size() int { return m.Size() } -func (m *ResourceClassParameters) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassParameters.DiscardUnknown(m) +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) } -var xxx_messageInfo_ResourceClassParameters proto.InternalMessageInfo +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo -func (m *ResourceClassParametersList) Reset() { *m = ResourceClassParametersList{} } -func (*ResourceClassParametersList) ProtoMessage() {} -func (*ResourceClassParametersList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{32} +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{35} } -func (m *ResourceClassParametersList) XXX_Unmarshal(b []byte) error { +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResourceClassParametersList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -959,523 +1044,330 @@ func (m *ResourceClassParametersList) XXX_Marshal(b []byte, deterministic bool) } return b[:n], nil } -func (m *ResourceClassParametersList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassParametersList.Merge(m, src) +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) } -func (m *ResourceClassParametersList) XXX_Size() int { +func (m *ResourceSliceSpec) XXX_Size() int { return m.Size() } -func (m *ResourceClassParametersList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassParametersList.DiscardUnknown(m) +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) } -var xxx_messageInfo_ResourceClassParametersList proto.InternalMessageInfo +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo -func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } -func (*ResourceClassParametersReference) ProtoMessage() {} -func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{33} -} -func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClassParametersReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassParametersReference.Merge(m, src) -} -func (m *ResourceClassParametersReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassParametersReference.DiscardUnknown(m) +func init() { + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult") + proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.DeviceAttribute") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1alpha3.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration") + proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContext") + proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextList") + proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextSpec") + proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1alpha3.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha3.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceSpec") } -var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo - -func (m *ResourceFilter) Reset() { *m = ResourceFilter{} } -func (*ResourceFilter) ProtoMessage() {} -func (*ResourceFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{34} -} -func (m *ResourceFilter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func init() { + proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2) +} + +var fileDescriptor_66649ee9bbcd89d2 = []byte{ + // 2085 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, + 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, + 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, + 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, + 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, + 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, + 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, + 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, + 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, + 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, + 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, + 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, + 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, + 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, + 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, + 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, + 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, + 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, + 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, + 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, + 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, + 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, + 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, + 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, + 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, + 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, + 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, + 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, + 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, + 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, + 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, + 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, + 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, + 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, + 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, + 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, + 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, + 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, + 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, + 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, + 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, + 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, + 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, + 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, + 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, + 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, + 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, + 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, + 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, + 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, + 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, + 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, + 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, + 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, + 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, + 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, + 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, + 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, + 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, + 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, + 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, + 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, + 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, + 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, + 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, + 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, + 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, + 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, + 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, + 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, + 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, + 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, + 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, + 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, + 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, + 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, + 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, + 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, + 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, + 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, + 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, + 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, + 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, + 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, + 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, + 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, + 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, + 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, + 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, + 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, + 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, + 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, + 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, + 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, + 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, + 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, + 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, + 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, + 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, + 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, + 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, + 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, + 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, + 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, + 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, + 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, + 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, + 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, + 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, + 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, + 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, + 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, + 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, + 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, + 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, + 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, + 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, + 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, + 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, + 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, + 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, + 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, + 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, + 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, + 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, + 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, + 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, + 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, + 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, + 0x20, 0x64, 0x20, 0x00, 0x00, } -func (m *ResourceFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ResourceFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceFilter.Merge(m, src) -} -func (m *ResourceFilter) XXX_Size() int { - return m.Size() -} -func (m *ResourceFilter) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceFilter.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ResourceFilter proto.InternalMessageInfo - -func (m *ResourceFilterModel) Reset() { *m = ResourceFilterModel{} } -func (*ResourceFilterModel) ProtoMessage() {} -func (*ResourceFilterModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{35} -} -func (m *ResourceFilterModel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceFilterModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Controller) + copy(dAtA[i:], m.Controller) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) + i-- + dAtA[i] = 0x22 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - return b[:n], nil -} -func (m *ResourceFilterModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceFilterModel.Merge(m, src) -} -func (m *ResourceFilterModel) XXX_Size() int { - return m.Size() -} -func (m *ResourceFilterModel) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceFilterModel.DiscardUnknown(m) + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -var xxx_messageInfo_ResourceFilterModel proto.InternalMessageInfo - -func (m *ResourceHandle) Reset() { *m = ResourceHandle{} } -func (*ResourceHandle) ProtoMessage() {} -func (*ResourceHandle) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{36} -} -func (m *ResourceHandle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *BasicDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ResourceHandle) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceHandle.Merge(m, src) -} -func (m *ResourceHandle) XXX_Size() int { - return m.Size() + return dAtA[:n], nil } -func (m *ResourceHandle) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceHandle.DiscardUnknown(m) + +func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo - -func (m *ResourceModel) Reset() { *m = ResourceModel{} } -func (*ResourceModel) ProtoMessage() {} -func (*ResourceModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{37} -} -func (m *ResourceModel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceModel.Merge(m, src) -} -func (m *ResourceModel) XXX_Size() int { - return m.Size() -} -func (m *ResourceModel) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceModel.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceModel proto.InternalMessageInfo - -func (m *ResourceRequest) Reset() { *m = ResourceRequest{} } -func (*ResourceRequest) ProtoMessage() {} -func (*ResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{38} -} -func (m *ResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceRequest.Merge(m, src) -} -func (m *ResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *ResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceRequest proto.InternalMessageInfo - -func (m *ResourceRequestModel) Reset() { *m = ResourceRequestModel{} } -func (*ResourceRequestModel) ProtoMessage() {} -func (*ResourceRequestModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{39} -} -func (m *ResourceRequestModel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceRequestModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceRequestModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceRequestModel.Merge(m, src) -} -func (m *ResourceRequestModel) XXX_Size() int { - return m.Size() -} -func (m *ResourceRequestModel) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceRequestModel.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceRequestModel proto.InternalMessageInfo - -func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } -func (*ResourceSlice) ProtoMessage() {} -func (*ResourceSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{40} -} -func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceSlice) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceSlice.Merge(m, src) -} -func (m *ResourceSlice) XXX_Size() int { - return m.Size() -} -func (m *ResourceSlice) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceSlice.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo - -func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } -func (*ResourceSliceList) ProtoMessage() {} -func (*ResourceSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{41} -} -func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceSliceList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceSliceList.Merge(m, src) -} -func (m *ResourceSliceList) XXX_Size() int { - return m.Size() -} -func (m *ResourceSliceList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo - -func (m *StructuredResourceHandle) Reset() { *m = StructuredResourceHandle{} } -func (*StructuredResourceHandle) ProtoMessage() {} -func (*StructuredResourceHandle) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{42} -} -func (m *StructuredResourceHandle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StructuredResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - return b[:n], nil -} -func (m *StructuredResourceHandle) XXX_Merge(src proto.Message) { - xxx_messageInfo_StructuredResourceHandle.Merge(m, src) -} -func (m *StructuredResourceHandle) XXX_Size() int { - return m.Size() -} -func (m *StructuredResourceHandle) XXX_DiscardUnknown() { - xxx_messageInfo_StructuredResourceHandle.DiscardUnknown(m) -} - -var xxx_messageInfo_StructuredResourceHandle proto.InternalMessageInfo - -func (m *VendorParameters) Reset() { *m = VendorParameters{} } -func (*VendorParameters) ProtoMessage() {} -func (*VendorParameters) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{43} -} -func (m *VendorParameters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VendorParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - return b[:n], nil -} -func (m *VendorParameters) XXX_Merge(src proto.Message) { - xxx_messageInfo_VendorParameters.Merge(m, src) -} -func (m *VendorParameters) XXX_Size() int { - return m.Size() -} -func (m *VendorParameters) XXX_DiscardUnknown() { - xxx_messageInfo_VendorParameters.DiscardUnknown(m) -} - -var xxx_messageInfo_VendorParameters proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") - proto.RegisterType((*AllocationResultModel)(nil), "k8s.io.api.resource.v1alpha2.AllocationResultModel") - proto.RegisterType((*DriverAllocationResult)(nil), "k8s.io.api.resource.v1alpha2.DriverAllocationResult") - proto.RegisterType((*DriverRequests)(nil), "k8s.io.api.resource.v1alpha2.DriverRequests") - proto.RegisterType((*NamedResourcesAllocationResult)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAllocationResult") - proto.RegisterType((*NamedResourcesAttribute)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAttribute") - proto.RegisterType((*NamedResourcesAttributeValue)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAttributeValue") - proto.RegisterType((*NamedResourcesFilter)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesFilter") - proto.RegisterType((*NamedResourcesInstance)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesInstance") - proto.RegisterType((*NamedResourcesIntSlice)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesIntSlice") - proto.RegisterType((*NamedResourcesRequest)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesRequest") - proto.RegisterType((*NamedResourcesResources)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesResources") - proto.RegisterType((*NamedResourcesStringSlice)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesStringSlice") - proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext") - proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList") - proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec") - proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus") - proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") - proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") - proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") - proto.RegisterType((*ResourceClaimParameters)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParameters") - proto.RegisterType((*ResourceClaimParametersList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersList") - proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") - proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") - proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") - proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") - proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") - proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") - proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") - proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") - proto.RegisterType((*ResourceClassParameters)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParameters") - proto.RegisterType((*ResourceClassParametersList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersList") - proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") - proto.RegisterType((*ResourceFilter)(nil), "k8s.io.api.resource.v1alpha2.ResourceFilter") - proto.RegisterType((*ResourceFilterModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceFilterModel") - proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle") - proto.RegisterType((*ResourceModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceModel") - proto.RegisterType((*ResourceRequest)(nil), "k8s.io.api.resource.v1alpha2.ResourceRequest") - proto.RegisterType((*ResourceRequestModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceRequestModel") - proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha2.ResourceSlice") - proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha2.ResourceSliceList") - proto.RegisterType((*StructuredResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.StructuredResourceHandle") - proto.RegisterType((*VendorParameters)(nil), "k8s.io.api.resource.v1alpha2.VendorParameters") -} - -func init() { - proto.RegisterFile("k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_4312f5b44a31ec02) -} - -var fileDescriptor_4312f5b44a31ec02 = []byte{ - // 2242 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x6c, 0x1c, 0x57, - 0xd9, 0xb3, 0xbb, 0x89, 0xd7, 0x9f, 0xed, 0xb5, 0x33, 0xb6, 0xe3, 0x4d, 0xea, 0xee, 0x6e, 0x47, - 0x20, 0x2c, 0x70, 0x76, 0x1b, 0xa7, 0x4d, 0xa3, 0x52, 0x90, 0x32, 0x71, 0x13, 0x2c, 0x9a, 0xd4, - 0x7d, 0x4b, 0xdc, 0xa6, 0xfc, 0x75, 0xbc, 0xf3, 0x62, 0x0f, 0xd9, 0x9d, 0xd9, 0xcc, 0x7b, 0xeb, - 0x26, 0xe2, 0x12, 0x55, 0x20, 0xb8, 0x20, 0x15, 0x81, 0x10, 0x9c, 0x38, 0x21, 0xc4, 0x85, 0x0b, - 0x5c, 0x39, 0x55, 0xd0, 0x1c, 0x83, 0x40, 0xa2, 0xe2, 0xb0, 0x22, 0xcb, 0x91, 0x23, 0xb7, 0x9e, - 0xd0, 0xbc, 0xf7, 0xe6, 0xe7, 0xcd, 0xce, 0xac, 0x77, 0x96, 0xc6, 0x4a, 0x4e, 0xde, 0x79, 0xef, - 0xfb, 0x7b, 0xdf, 0xff, 0x7b, 0x9f, 0x61, 0xe3, 0xce, 0x25, 0x52, 0xb7, 0x9c, 0x86, 0xd1, 0xb5, - 0x1a, 0x2e, 0x26, 0x4e, 0xcf, 0x6d, 0xe1, 0xc6, 0xe1, 0x79, 0xa3, 0xdd, 0x3d, 0x30, 0x36, 0x1b, - 0xfb, 0xd8, 0xc6, 0xae, 0x41, 0xb1, 0x59, 0xef, 0xba, 0x0e, 0x75, 0xd4, 0x35, 0x0e, 0x5d, 0x37, - 0xba, 0x56, 0xdd, 0x87, 0xae, 0xfb, 0xd0, 0x67, 0xcf, 0xed, 0x5b, 0xf4, 0xa0, 0xb7, 0x57, 0x6f, - 0x39, 0x9d, 0xc6, 0xbe, 0xb3, 0xef, 0x34, 0x18, 0xd2, 0x5e, 0xef, 0x36, 0xfb, 0x62, 0x1f, 0xec, - 0x17, 0x27, 0x76, 0x56, 0x8b, 0xb0, 0x6e, 0x39, 0xae, 0xc7, 0x36, 0xce, 0xf0, 0xec, 0x4b, 0x21, - 0x4c, 0xc7, 0x68, 0x1d, 0x58, 0x36, 0x76, 0xef, 0x37, 0xba, 0x77, 0xf6, 0x65, 0x79, 0xb3, 0x60, - 0x91, 0x46, 0x07, 0x53, 0x23, 0x89, 0x57, 0x23, 0x0d, 0xcb, 0xed, 0xd9, 0xd4, 0xea, 0x0c, 0xb3, - 0xb9, 0x78, 0x14, 0x02, 0x69, 0x1d, 0xe0, 0x8e, 0x11, 0xc7, 0xd3, 0x7e, 0x99, 0x83, 0xc5, 0xcb, - 0xed, 0xb6, 0xd3, 0x32, 0xa8, 0xe5, 0xd8, 0x08, 0x93, 0x5e, 0x9b, 0xaa, 0x0e, 0x2c, 0xf8, 0xe7, - 0xf9, 0x9a, 0x61, 0x9b, 0x6d, 0x4c, 0xca, 0x4a, 0x2d, 0xbf, 0x3e, 0xbb, 0xb9, 0x51, 0x1f, 0xa5, - 0xf4, 0x3a, 0x92, 0x90, 0xf4, 0xd5, 0x87, 0xfd, 0xea, 0xd4, 0xa0, 0x5f, 0x5d, 0x90, 0xd7, 0x09, - 0x8a, 0x53, 0x57, 0xf7, 0x60, 0xd1, 0x38, 0x34, 0xac, 0xb6, 0xb1, 0xd7, 0xc6, 0x6f, 0xda, 0x37, - 0x1c, 0x13, 0x93, 0x72, 0xae, 0xa6, 0xac, 0xcf, 0x6e, 0xd6, 0xa2, 0x1c, 0x3d, 0xcb, 0xd4, 0x0f, - 0xcf, 0xd7, 0x3d, 0x80, 0x26, 0x6e, 0xe3, 0x16, 0x75, 0x5c, 0x7d, 0x79, 0xd0, 0xaf, 0x2e, 0x5e, - 0x8e, 0x61, 0xa3, 0x21, 0x7a, 0x6a, 0x03, 0x66, 0xc8, 0x81, 0xe1, 0x62, 0x6f, 0xad, 0x9c, 0xaf, - 0x29, 0xeb, 0x45, 0xfd, 0x94, 0x10, 0x70, 0xa6, 0xe9, 0x6f, 0xa0, 0x10, 0x46, 0xfb, 0xa9, 0x02, - 0x2b, 0x71, 0xd5, 0x5c, 0x77, 0x4c, 0xdc, 0x56, 0xef, 0x41, 0xc9, 0x36, 0x3a, 0xd8, 0xf4, 0xcf, - 0xe5, 0xa9, 0xc7, 0x13, 0xf6, 0xb5, 0xd1, 0xea, 0xb9, 0x21, 0xe1, 0xc4, 0x49, 0xeb, 0xea, 0xa0, - 0x5f, 0x2d, 0xc9, 0x30, 0x28, 0xc6, 0x47, 0xfb, 0x7d, 0x0e, 0x4e, 0x6f, 0xb9, 0xd6, 0x21, 0x76, - 0x87, 0x8c, 0xf6, 0x63, 0x05, 0x56, 0x0f, 0xb1, 0x6d, 0x3a, 0x2e, 0xc2, 0x77, 0x7b, 0x98, 0xd0, - 0x1d, 0xc3, 0x35, 0x3a, 0x98, 0x62, 0xd7, 0x17, 0xef, 0x5c, 0x44, 0xbc, 0xc0, 0x49, 0xea, 0xdd, - 0x3b, 0xfb, 0x75, 0xe1, 0x24, 0x75, 0x64, 0xbc, 0xff, 0xfa, 0x3d, 0x8a, 0x6d, 0x62, 0x39, 0xb6, - 0x5e, 0x15, 0xda, 0x59, 0xdd, 0x4d, 0xa6, 0x8a, 0xd2, 0xd8, 0x79, 0xa2, 0xac, 0x18, 0x49, 0x9a, - 0x13, 0x46, 0xbd, 0x30, 0x5a, 0x4f, 0x89, 0x4a, 0xd7, 0x9f, 0x17, 0xe2, 0x24, 0xdb, 0x04, 0x25, - 0x33, 0xd4, 0x7e, 0x91, 0x83, 0x12, 0x57, 0x98, 0x10, 0x93, 0xa8, 0x9b, 0x00, 0x26, 0x5b, 0xf1, - 0x74, 0xcd, 0x54, 0x33, 0xa3, 0xab, 0x82, 0x38, 0x6c, 0x05, 0x3b, 0x28, 0x02, 0xa5, 0x12, 0x58, - 0xe4, 0x87, 0x8d, 0x28, 0x35, 0x37, 0x89, 0x52, 0xcb, 0x82, 0xd1, 0xe2, 0x6e, 0x8c, 0x1c, 0x1a, - 0x62, 0xa0, 0x7e, 0x13, 0x8a, 0xae, 0x10, 0xba, 0x9c, 0x67, 0xf1, 0x77, 0x6e, 0xbc, 0xf8, 0x13, - 0x47, 0xd5, 0x17, 0x05, 0xb3, 0xa2, 0x7f, 0x76, 0x14, 0x10, 0xd4, 0x74, 0xa8, 0x8c, 0xf6, 0x47, - 0xb5, 0x06, 0x05, 0x3b, 0xd4, 0xd0, 0x9c, 0xa0, 0x55, 0x60, 0xba, 0x61, 0x3b, 0xda, 0x5f, 0x14, - 0x58, 0x8d, 0x11, 0xa1, 0xd4, 0xb5, 0xf6, 0x7a, 0x14, 0x1f, 0x8d, 0xed, 0x79, 0x49, 0xc9, 0xf0, - 0xe1, 0x77, 0x8d, 0x76, 0x0f, 0x0b, 0x95, 0xbe, 0x9a, 0x29, 0x8c, 0x24, 0x0a, 0xfa, 0xe7, 0x04, - 0xa3, 0xb5, 0x51, 0x50, 0x28, 0xc6, 0x57, 0xfb, 0x4f, 0x1e, 0x46, 0x22, 0xa8, 0xdf, 0x86, 0xe2, - 0xdd, 0x9e, 0x61, 0x53, 0x8b, 0xde, 0x2f, 0x9f, 0x64, 0x42, 0xd6, 0x53, 0xed, 0x2e, 0x49, 0xfd, - 0x96, 0xc0, 0xd2, 0x4f, 0x0d, 0xfa, 0xd5, 0x79, 0xff, 0x8b, 0x4b, 0x11, 0x90, 0x54, 0x5f, 0x80, - 0xc2, 0x9e, 0xe3, 0xf0, 0xf0, 0x28, 0xea, 0xf3, 0x5e, 0x4a, 0xd2, 0x1d, 0xa7, 0xcd, 0xc1, 0xd8, - 0x96, 0x5a, 0x81, 0xbc, 0x65, 0xd3, 0xf2, 0x74, 0x4d, 0x59, 0xcf, 0xeb, 0x73, 0x9e, 0x51, 0xb7, - 0x6d, 0xca, 0x01, 0xbc, 0x0d, 0xb5, 0x05, 0x45, 0xcb, 0xa6, 0xcd, 0xb6, 0xd5, 0xc2, 0xe5, 0x22, - 0x93, 0xf0, 0xa5, 0x2c, 0x6a, 0xdc, 0x16, 0xb8, 0x5c, 0x4e, 0xff, 0x4b, 0xc8, 0xe9, 0x13, 0x56, - 0xbf, 0x00, 0x27, 0x09, 0x75, 0x2d, 0x7b, 0xbf, 0x7c, 0x82, 0x99, 0x75, 0x61, 0xd0, 0xaf, 0xce, - 0x36, 0xd9, 0x0a, 0x07, 0x15, 0xdb, 0xaa, 0x03, 0xb3, 0xfc, 0x17, 0x17, 0x68, 0x86, 0x09, 0xf4, - 0x4a, 0x16, 0x81, 0x9a, 0x21, 0x3a, 0x4f, 0xf1, 0x91, 0x05, 0xce, 0x2b, 0xca, 0x41, 0xfd, 0x22, - 0x4c, 0x1f, 0x62, 0xd7, 0x0b, 0xb1, 0x32, 0x30, 0xd1, 0x16, 0x07, 0xfd, 0xea, 0xdc, 0x2e, 0x5f, - 0xe2, 0xf0, 0x3e, 0x80, 0xb6, 0x05, 0xcb, 0x32, 0xaf, 0xab, 0x56, 0x9b, 0x62, 0x57, 0xdd, 0x80, - 0x22, 0x11, 0x55, 0x45, 0xb8, 0x6d, 0x10, 0x40, 0x7e, 0xb5, 0x41, 0x01, 0x84, 0xf6, 0x1b, 0x05, - 0x4e, 0xc7, 0x75, 0x48, 0xa8, 0x61, 0xb7, 0xc6, 0xf1, 0x7d, 0x0b, 0x20, 0x70, 0x41, 0x2f, 0x93, - 0x78, 0xc1, 0xfd, 0xf2, 0x44, 0x6e, 0x1f, 0xa6, 0xae, 0x60, 0x89, 0xa0, 0x08, 0x71, 0xed, 0xe2, - 0xb0, 0x98, 0xc2, 0x9a, 0x6b, 0x50, 0xb0, 0x6c, 0xca, 0x6b, 0x7b, 0x5e, 0x2f, 0x7a, 0x22, 0x6e, - 0xdb, 0x94, 0x20, 0xb6, 0xaa, 0xbd, 0x0e, 0x2b, 0xb1, 0x62, 0xc4, 0x53, 0x47, 0x46, 0x35, 0x3d, - 0x18, 0xca, 0x11, 0xc1, 0x0f, 0x15, 0xc3, 0x8c, 0x25, 0x74, 0xe6, 0x77, 0x18, 0x19, 0x9d, 0x96, - 0x23, 0x87, 0x85, 0xdc, 0x5f, 0x21, 0x28, 0xa4, 0xac, 0xe9, 0x70, 0x26, 0xd5, 0xb7, 0xd4, 0xcf, - 0xc3, 0x34, 0xf7, 0x23, 0x2e, 0xc1, 0x8c, 0x3e, 0x3b, 0xe8, 0x57, 0xa7, 0x39, 0x04, 0x41, 0xfe, - 0x9e, 0xf6, 0xc7, 0x1c, 0x2c, 0xef, 0x38, 0x66, 0xb3, 0x75, 0x80, 0xcd, 0x5e, 0xdb, 0xb2, 0xf7, - 0xaf, 0x38, 0x36, 0xc5, 0xf7, 0xa8, 0xfa, 0x1e, 0x14, 0xbd, 0x26, 0xce, 0x34, 0xa8, 0x21, 0xca, - 0xec, 0x8b, 0xa3, 0x32, 0x03, 0xa9, 0x7b, 0xd0, 0x5e, 0x13, 0xf3, 0xe6, 0xde, 0xf7, 0x70, 0x8b, - 0x5e, 0xc7, 0xd4, 0x08, 0x4d, 0x18, 0xae, 0xa1, 0x80, 0xaa, 0xfa, 0x0e, 0x14, 0x48, 0x17, 0xb7, - 0x44, 0x72, 0xbc, 0x38, 0x5a, 0x41, 0x49, 0x32, 0x36, 0xbb, 0xb8, 0x15, 0x7a, 0xa1, 0xf7, 0x85, - 0x18, 0x45, 0xf5, 0x3d, 0x2f, 0x9c, 0x0d, 0xda, 0x23, 0xac, 0x1f, 0x9a, 0xdd, 0xbc, 0x34, 0x01, - 0x6d, 0x86, 0xaf, 0x97, 0x04, 0xf5, 0x93, 0xfc, 0x1b, 0x09, 0xba, 0xda, 0x5f, 0x15, 0x28, 0x27, - 0xa1, 0xbd, 0x61, 0x11, 0xaa, 0x7e, 0x6b, 0x48, 0x75, 0xf5, 0xf1, 0x54, 0xe7, 0x61, 0x33, 0xc5, - 0x05, 0x8e, 0xe7, 0xaf, 0x44, 0xd4, 0xf6, 0x36, 0x9c, 0xb0, 0x28, 0xee, 0xf8, 0xd1, 0xb5, 0x99, - 0xfd, 0x6c, 0xfa, 0xbc, 0x20, 0x7f, 0x62, 0xdb, 0x23, 0x84, 0x38, 0x3d, 0xed, 0xc3, 0x94, 0x33, - 0x79, 0x8a, 0x55, 0x2f, 0xc1, 0x1c, 0x77, 0x7d, 0x6c, 0x7a, 0x6d, 0xa7, 0x08, 0x90, 0x65, 0x41, - 0x68, 0xae, 0x19, 0xd9, 0x43, 0x12, 0xa4, 0xfa, 0x2a, 0x94, 0xba, 0x0e, 0xc5, 0x36, 0xb5, 0x8c, - 0xb6, 0xdf, 0x01, 0x7b, 0xfe, 0xc8, 0xda, 0xc2, 0x1d, 0x69, 0x07, 0xc5, 0x20, 0xb5, 0x5f, 0x29, - 0x70, 0x36, 0xdd, 0x3a, 0xea, 0xf7, 0xa1, 0xe4, 0x9f, 0xf8, 0x4a, 0xdb, 0xb0, 0x3a, 0x7e, 0xb0, - 0x7d, 0x79, 0xbc, 0x76, 0x82, 0xe1, 0x84, 0xb4, 0x85, 0xc9, 0x4f, 0x8b, 0x33, 0x95, 0x24, 0x30, - 0x82, 0x62, 0xac, 0xb4, 0x5f, 0xe7, 0x60, 0x5e, 0x02, 0x39, 0x86, 0x90, 0x79, 0x4b, 0x0a, 0x99, - 0x46, 0x96, 0x63, 0xa6, 0xc5, 0xca, 0xad, 0x58, 0xac, 0x9c, 0xcf, 0x42, 0x74, 0x74, 0x90, 0x0c, - 0x14, 0xa8, 0x48, 0xf0, 0x57, 0x1c, 0x9b, 0xf4, 0x3a, 0x5e, 0xcb, 0x7a, 0x1b, 0xbb, 0xd8, 0xab, - 0x28, 0x1b, 0x50, 0x34, 0xba, 0xd6, 0x35, 0xd7, 0xe9, 0x75, 0xe3, 0x39, 0xf7, 0xf2, 0xce, 0x36, - 0x5b, 0x47, 0x01, 0x84, 0x07, 0xed, 0x4b, 0xc4, 0xa4, 0x9d, 0x89, 0x76, 0x82, 0xa2, 0x45, 0x0c, - 0x20, 0x82, 0x6a, 0x55, 0x48, 0xad, 0x56, 0x3a, 0xe4, 0x7b, 0x96, 0x29, 0x6a, 0xfe, 0x8b, 0x02, - 0x20, 0x7f, 0x73, 0x7b, 0xeb, 0xd3, 0x7e, 0xf5, 0x85, 0xb4, 0x8b, 0x27, 0xbd, 0xdf, 0xc5, 0xa4, - 0x7e, 0x73, 0x7b, 0x0b, 0x79, 0xc8, 0xda, 0x47, 0x0a, 0x9c, 0x92, 0x0e, 0x79, 0x0c, 0x29, 0x60, - 0x47, 0x4e, 0x01, 0x5f, 0xca, 0x60, 0xb2, 0x94, 0xd8, 0xff, 0x59, 0x1e, 0x56, 0x25, 0xb8, 0x48, - 0xbb, 0xfe, 0xe4, 0xdd, 0xfa, 0x7d, 0x98, 0x0f, 0xee, 0xef, 0x57, 0x5d, 0xa7, 0x23, 0xfc, 0xfb, - 0xab, 0x19, 0xce, 0x15, 0xb9, 0x70, 0xf8, 0xce, 0xc5, 0x5b, 0xbe, 0x6b, 0x51, 0xc2, 0x48, 0xe6, - 0x93, 0xf9, 0xee, 0xac, 0xb6, 0xa1, 0x64, 0x4a, 0xb7, 0xae, 0x72, 0x61, 0x9c, 0x07, 0x04, 0xf9, - 0xa6, 0x16, 0xa6, 0x18, 0x79, 0x1d, 0xc5, 0x68, 0x6b, 0xff, 0x50, 0xe0, 0xb9, 0x94, 0x53, 0x1e, - 0x83, 0x97, 0xbd, 0x2b, 0x7b, 0xd9, 0xcb, 0x13, 0x59, 0x23, 0xc5, 0xdf, 0x7e, 0xae, 0x40, 0xed, - 0x28, 0xfb, 0x65, 0x4c, 0x0e, 0x35, 0x28, 0xdc, 0xb1, 0x6c, 0x93, 0xf9, 0x4e, 0x24, 0xdc, 0xbf, - 0x6e, 0xd9, 0x26, 0x62, 0x3b, 0x41, 0x42, 0xc8, 0xa7, 0x5e, 0xfc, 0x1e, 0x28, 0xf0, 0xfc, 0xc8, - 0xea, 0x30, 0x46, 0x0b, 0xfc, 0x15, 0x58, 0xe8, 0xd9, 0xa4, 0x67, 0x51, 0xcf, 0x61, 0xa2, 0x05, - 0x6f, 0x69, 0xd0, 0xaf, 0x2e, 0xdc, 0x94, 0xb7, 0x50, 0x1c, 0x56, 0xfb, 0x6d, 0x2e, 0x96, 0x4f, - 0x58, 0xf9, 0xbd, 0x06, 0xa7, 0x22, 0xe5, 0x87, 0x90, 0xc8, 0x15, 0xff, 0x8c, 0x90, 0x21, 0x8a, - 0xc5, 0x01, 0xd0, 0x30, 0x8e, 0x17, 0x6a, 0xdd, 0xa8, 0xaa, 0x3f, 0xcb, 0x50, 0x93, 0x36, 0x90, - 0xcc, 0x47, 0xdd, 0x81, 0x52, 0xf8, 0x92, 0x71, 0xdd, 0x6b, 0x21, 0xb8, 0x19, 0xd6, 0xfd, 0x58, - 0xb8, 0x2c, 0xed, 0x7e, 0x3a, 0xb4, 0x82, 0x62, 0xf8, 0xda, 0x7f, 0x73, 0xb0, 0x94, 0x50, 0x8e, - 0x26, 0x7a, 0x07, 0xf9, 0x0e, 0x40, 0x48, 0x5d, 0xe8, 0xa4, 0x9e, 0xed, 0x35, 0x47, 0x2f, 0xb1, - 0xcb, 0x4a, 0xb8, 0x1a, 0xa1, 0xa8, 0x12, 0x98, 0x75, 0x31, 0xc1, 0xee, 0x21, 0x36, 0xaf, 0x3a, - 0xae, 0x78, 0xf5, 0x78, 0x2d, 0x83, 0xd2, 0x87, 0x4a, 0xa7, 0xbe, 0x24, 0x8e, 0x34, 0x8b, 0x42, - 0xc2, 0x28, 0xca, 0x45, 0x6d, 0xc2, 0x8a, 0x89, 0xa3, 0xcf, 0x47, 0x2c, 0xad, 0x60, 0x93, 0x55, - 0xc4, 0x62, 0xf8, 0xf0, 0xb4, 0x95, 0x04, 0x84, 0x92, 0x71, 0xb5, 0xbf, 0x2b, 0xb0, 0x22, 0x49, - 0xf6, 0x0d, 0xdc, 0xe9, 0xb6, 0x0d, 0x8a, 0x8f, 0xa1, 0x4e, 0xdc, 0x92, 0xda, 0x9f, 0x57, 0x32, - 0xa8, 0xcf, 0x17, 0x32, 0xad, 0x0d, 0xd2, 0xfe, 0xa6, 0xc0, 0x99, 0x44, 0x8c, 0x63, 0x48, 0xb4, - 0xef, 0xc8, 0x89, 0xf6, 0xc2, 0x04, 0xe7, 0x4a, 0x49, 0xb3, 0x8f, 0xd2, 0x4e, 0xd5, 0xe4, 0xd7, - 0xa4, 0x67, 0xaf, 0x5f, 0xd5, 0x3e, 0xce, 0x4b, 0x6d, 0x37, 0x39, 0x8e, 0xfe, 0x44, 0xce, 0x28, - 0xb9, 0xb1, 0x32, 0xca, 0x50, 0xa2, 0xcd, 0x67, 0x4c, 0xb4, 0x84, 0x4c, 0x96, 0x68, 0x6f, 0xc1, - 0xbc, 0x5c, 0x7d, 0x0a, 0x63, 0x0e, 0x1c, 0x18, 0xe9, 0xa6, 0x54, 0x9d, 0x64, 0x4a, 0xea, 0x1b, - 0xb0, 0x4c, 0xa8, 0xdb, 0x6b, 0xd1, 0x9e, 0x8b, 0xcd, 0xc8, 0x8b, 0xf1, 0x09, 0x96, 0x4f, 0xca, - 0x83, 0x7e, 0x75, 0xb9, 0x99, 0xb0, 0x8f, 0x12, 0xb1, 0xe2, 0x9d, 0x33, 0x21, 0x4f, 0x73, 0xe7, - 0x4c, 0xd2, 0x3a, 0x99, 0x8f, 0xe4, 0xce, 0x39, 0x6a, 0xb5, 0x67, 0xa1, 0x73, 0x1e, 0xe1, 0x65, - 0x23, 0x3b, 0x67, 0x9a, 0x30, 0x38, 0xe0, 0x55, 0xed, 0x88, 0xb2, 0x19, 0x9f, 0x0f, 0x64, 0x9a, - 0x1c, 0xbc, 0x0d, 0xd3, 0xb7, 0xd9, 0x9b, 0xe6, 0x98, 0x7d, 0xb7, 0x7f, 0x50, 0xfe, 0x10, 0xaa, - 0x2f, 0x08, 0x56, 0xd3, 0xfc, 0x9b, 0x20, 0x9f, 0x5a, 0xbc, 0xd3, 0x8e, 0x6a, 0xe5, 0x69, 0xee, - 0xb4, 0xa3, 0x72, 0xa6, 0xf8, 0xe7, 0x9f, 0xe5, 0x4e, 0x3b, 0xd1, 0xde, 0xc7, 0xdf, 0x69, 0x7b, - 0x37, 0x2f, 0xef, 0x2f, 0xe9, 0x1a, 0x2d, 0xff, 0x86, 0x1e, 0xdc, 0xbc, 0x6e, 0xf8, 0x1b, 0x28, - 0x84, 0xd1, 0x3e, 0x56, 0xa0, 0x24, 0x9b, 0x73, 0xa2, 0x46, 0xef, 0x81, 0x02, 0x4b, 0xae, 0x44, - 0x26, 0x3a, 0xc0, 0x3b, 0x9f, 0xc5, 0x9d, 0xf8, 0xf8, 0xee, 0x39, 0xc1, 0x70, 0x29, 0x61, 0x13, - 0x25, 0xb1, 0xd2, 0x7e, 0xa8, 0x40, 0x12, 0xb0, 0x6a, 0xa7, 0x4c, 0x5f, 0x37, 0xb3, 0x3c, 0x1d, - 0x0b, 0x4f, 0x1f, 0x67, 0xe6, 0xfa, 0xcf, 0x88, 0x46, 0xf9, 0xc0, 0x7a, 0x22, 0x8d, 0xd6, 0xa0, - 0xc0, 0xc2, 0x22, 0xe6, 0x0d, 0x5b, 0x06, 0x35, 0x10, 0xdb, 0x51, 0x5d, 0x28, 0x85, 0x05, 0xc0, - 0x5b, 0x67, 0x05, 0xe3, 0xc8, 0x27, 0xdf, 0xb0, 0x94, 0xc4, 0xe6, 0xef, 0xec, 0x70, 0x4d, 0x89, - 0x22, 0x8a, 0x71, 0xd0, 0x3e, 0x50, 0xc2, 0x36, 0x81, 0xab, 0xf7, 0x6e, 0x8a, 0x7a, 0x33, 0x8d, - 0x27, 0x82, 0x1f, 0x63, 0x69, 0xf8, 0x27, 0x39, 0x58, 0x88, 0xcd, 0x2e, 0x13, 0x27, 0xae, 0xca, - 0x93, 0x9e, 0xb8, 0xfe, 0x40, 0x81, 0x65, 0x57, 0x16, 0x24, 0xea, 0xf6, 0x9b, 0x99, 0xc6, 0xaf, - 0xdc, 0xef, 0xd7, 0x04, 0xfb, 0xe5, 0xa4, 0x5d, 0x94, 0xc8, 0x4d, 0xfb, 0x91, 0x02, 0x89, 0xe0, - 0xaa, 0x93, 0x62, 0x9b, 0x0b, 0xd9, 0x6c, 0xc3, 0xa7, 0xc3, 0xe3, 0x58, 0xe6, 0x4f, 0x91, 0xc7, - 0x5b, 0x3e, 0x2f, 0x79, 0xf2, 0xb5, 0x7a, 0x03, 0x8a, 0xb6, 0x63, 0xe2, 0x48, 0x0f, 0x19, 0x24, - 0xd9, 0x1b, 0x62, 0x1d, 0x05, 0x10, 0xb1, 0x50, 0xcc, 0x8f, 0x15, 0x8a, 0x07, 0x30, 0xef, 0x46, - 0x7d, 0x5e, 0xb4, 0x7e, 0x63, 0x76, 0x39, 0xdc, 0xae, 0x2b, 0x82, 0x87, 0x1c, 0x3d, 0x48, 0x26, - 0x2c, 0xf5, 0x6e, 0x4c, 0x7f, 0x4f, 0x6d, 0xef, 0xc6, 0x27, 0xad, 0xc9, 0xb5, 0xf1, 0x0f, 0x79, - 0x28, 0xa7, 0x65, 0x19, 0xf5, 0x03, 0x05, 0x56, 0x78, 0x20, 0xc5, 0xca, 0xe6, 0x64, 0xe1, 0x1a, - 0xdc, 0xb6, 0x77, 0x93, 0x68, 0xa2, 0x64, 0x56, 0xb2, 0x10, 0xd1, 0xa7, 0x97, 0xc9, 0xfe, 0x4b, - 0x63, 0x58, 0x08, 0xe9, 0x39, 0x27, 0x99, 0x95, 0xe4, 0xb8, 0x85, 0x23, 0x1d, 0xf7, 0xbb, 0x30, - 0xed, 0xb2, 0x07, 0x11, 0xef, 0x5e, 0x30, 0xc6, 0xe8, 0x33, 0xf9, 0xdf, 0x7e, 0xc2, 0x5e, 0x8d, - 0x7f, 0x13, 0xe4, 0x53, 0xd5, 0x7e, 0xa7, 0xc0, 0x50, 0xce, 0x9b, 0xa8, 0x72, 0x19, 0x00, 0xdd, - 0xff, 0x53, 0xa1, 0x01, 0x8b, 0x88, 0x16, 0x23, 0x44, 0x75, 0xfd, 0xe1, 0xe3, 0xca, 0xd4, 0xa3, - 0xc7, 0x95, 0xa9, 0x4f, 0x1e, 0x57, 0xa6, 0x1e, 0x0c, 0x2a, 0xca, 0xc3, 0x41, 0x45, 0x79, 0x34, - 0xa8, 0x28, 0x9f, 0x0c, 0x2a, 0xca, 0xbf, 0x06, 0x15, 0xe5, 0xc3, 0x7f, 0x57, 0xa6, 0xde, 0x5d, - 0x1b, 0xf5, 0x0f, 0x82, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x94, 0xb7, 0xe5, 0x3f, 0x28, - 0x00, 0x00, + return len(dAtA) - i, nil } -func (m *AllocationResult) Marshal() (dAtA []byte, err error) { +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1485,54 +1377,25 @@ func (m *AllocationResult) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) i-- - if m.Shareable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if m.AvailableOnNodes != nil { - { - size, err := m.AvailableOnNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ResourceHandles) > 0 { - for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *AllocationResultModel) Marshal() (dAtA []byte, err error) { +func (m *Device) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1542,19 +1405,19 @@ func (m *AllocationResultModel) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllocationResultModel) MarshalTo(dAtA []byte) (int, error) { +func (m *Device) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllocationResultModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.NamedResources != nil { + if m.Basic != nil { { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1562,12 +1425,17 @@ func (m *AllocationResultModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *DriverAllocationResult) Marshal() (dAtA []byte, err error) { +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1577,18 +1445,18 @@ func (m *DriverAllocationResult) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DriverAllocationResult) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DriverAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.AllocationResultModel.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1596,21 +1464,25 @@ func (m *DriverAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - { - size, err := m.VendorRequestParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *DriverRequests) Marshal() (dAtA []byte, err error) { +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1620,20 +1492,20 @@ func (m *DriverRequests) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DriverRequests) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DriverRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Requests) > 0 { - for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1641,28 +1513,27 @@ func (m *DriverRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } } - { - size, err := m.VendorParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *NamedResourcesAllocationResult) Marshal() (dAtA []byte, err error) { +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1672,78 +1543,12 @@ func (m *NamedResourcesAllocationResult) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NamedResourcesAllocationResult) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamedResourcesAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesAttribute) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesAttribute) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.NamedResourcesAttributeValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesAttributeValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesAttributeValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesAttributeValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1753,55 +1558,14 @@ func (m *NamedResourcesAttributeValue) MarshalToSizedBuffer(dAtA []byte) (int, e copy(dAtA[i:], *m.VersionValue) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) i-- - dAtA[i] = 0x52 - } - if m.StringSliceValue != nil { - { - size, err := m.StringSliceValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.IntSliceValue != nil { - { - size, err := m.IntSliceValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.IntValue != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) - i-- - dAtA[i] = 0x38 - } - if m.QuantityValue != nil { - { - size, err := m.QuantityValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + dAtA[i] = 0x2a } if m.StringValue != nil { i -= len(*m.StringValue) copy(dAtA[i:], *m.StringValue) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x22 } if m.BoolValue != nil { i-- @@ -1811,40 +1575,17 @@ func (m *NamedResourcesAttributeValue) MarshalToSizedBuffer(dAtA []byte) (int, e dAtA[i] = 0 } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x18 } - return len(dAtA) - i, nil -} - -func (m *NamedResourcesFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 } - return dAtA[:n], nil -} - -func (m *NamedResourcesFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *NamedResourcesInstance) Marshal() (dAtA []byte, err error) { +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1854,20 +1595,20 @@ func (m *NamedResourcesInstance) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NamedResourcesInstance) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamedResourcesInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1875,99 +1616,27 @@ func (m *NamedResourcesInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesIntSlice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesIntSlice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesIntSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ints) > 0 { - for iNdEx := len(m.Ints) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintGenerated(dAtA, i, uint64(m.Ints[iNdEx])) + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 } } - return len(dAtA) - i, nil -} - -func (m *NamedResourcesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesResources) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesResources) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Instances) > 0 { - for iNdEx := len(m.Instances) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Instances[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1981,7 +1650,7 @@ func (m *NamedResourcesResources) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *NamedResourcesStringSlice) Marshal() (dAtA []byte, err error) { +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1991,21 +1660,31 @@ func (m *NamedResourcesStringSlice) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NamedResourcesStringSlice) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamedResourcesStringSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Strings) > 0 { - for iNdEx := len(m.Strings) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Strings[iNdEx]) - copy(dAtA[i:], m.Strings[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strings[iNdEx]))) + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) i-- dAtA[i] = 0xa } @@ -2013,7 +1692,7 @@ func (m *NamedResourcesStringSlice) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2023,18 +1702,18 @@ func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2042,9 +1721,9 @@ func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2052,9 +1731,32 @@ func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2066,7 +1768,7 @@ func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2076,12 +1778,12 @@ func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2113,7 +1815,7 @@ func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2123,34 +1825,60 @@ func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.PotentialNodes) > 0 { - for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PotentialNodes[iNdEx]) - copy(dAtA[i:], m.PotentialNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.SuitableNodes != nil { + { + size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - i -= len(m.SelectedNode) - copy(dAtA[i:], m.SelectedNode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2160,26 +1888,63 @@ func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceClaims) > 0 { - for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) i-- dAtA[i] = 0xa } @@ -2187,7 +1952,7 @@ func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2197,50 +1962,60 @@ func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + if m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) i-- dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2250,40 +2025,40 @@ func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) i-- dAtA[i] = 0x22 - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) i-- dAtA[i] = 0x1a - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2293,32 +2068,53 @@ func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2326,11 +2122,16 @@ func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimParameters) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2340,50 +2141,36 @@ func (m *ResourceClaimParameters) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimParameters) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.DriverRequests) > 0 { - for iNdEx := len(m.DriverRequests) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DriverRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - if m.Shareable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if m.GeneratedFrom != nil { - { - size, err := m.GeneratedFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2397,7 +2184,7 @@ func (m *ResourceClaimParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *ResourceClaimParametersList) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2407,12 +2194,12 @@ func (m *ResourceClaimParametersList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimParametersList) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimParametersList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2444,7 +2231,7 @@ func (m *ResourceClaimParametersList) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *ResourceClaimParametersReference) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2454,35 +2241,34 @@ func (m *ResourceClaimParametersReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimParametersReference) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + if len(m.PotentialNodes) > 0 { + for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PotentialNodes[iNdEx]) + copy(dAtA[i:], m.PotentialNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.SelectedNode) + copy(dAtA[i:], m.SelectedNode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2492,7 +2278,187 @@ func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceClaims) > 0 { + for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } @@ -2539,26 +2505,19 @@ func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.AllocationMode) - copy(dAtA[i:], m.AllocationMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i -= len(m.Controller) + copy(dAtA[i:], m.Controller) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) i-- - dAtA[i] = 0x1a - if m.ParametersRef != nil { - { - size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x12 + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= len(m.ResourceClassName) - copy(dAtA[i:], m.ResourceClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClassName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -2591,7 +2550,7 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x18 if len(m.ReservedFor) > 0 { for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { { @@ -2603,7 +2562,7 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } } if m.Allocation != nil { @@ -2616,13 +2575,8 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -2759,7 +2713,7 @@ func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *ResourceClass) Marshal() (dAtA []byte, err error) { +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2769,69 +2723,31 @@ func (m *ResourceClass) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClass) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.StructuredParameters != nil { - i-- - if *m.StructuredParameters { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.SuitableNodes != nil { - { - size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.ParametersRef != nil { - { - size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClassList) Marshal() (dAtA []byte, err error) { +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2841,32 +2757,18 @@ func (m *ResourceClassList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClassList) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2874,70 +2776,7 @@ func (m *ResourceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClassParameters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClassParameters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClassParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Filters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.VendorParameters) > 0 { - for iNdEx := len(m.VendorParameters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VendorParameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.GeneratedFrom != nil { - { - size, err := m.GeneratedFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2951,7 +2790,7 @@ func (m *ResourceClassParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *ResourceClassParametersList) Marshal() (dAtA []byte, err error) { +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2961,12 +2800,12 @@ func (m *ResourceClassParametersList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClassParametersList) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClassParametersList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2998,7 +2837,7 @@ func (m *ResourceClassParametersList) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *ResourceClassParametersReference) Marshal() (dAtA []byte, err error) { +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3008,61 +2847,57 @@ func (m *ResourceClassParametersReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClassParametersReference) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + if m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + dAtA[i] = 0x28 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - return dAtA[:n], nil -} - -func (m *ResourceFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a { - size, err := m.ResourceFilterModel.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3071,625 +2906,366 @@ func (m *ResourceFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceFilterModel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base } - -func (m *ResourceFilterModel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Controller) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceFilterModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *BasicDevice) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.NamedResources != nil { - { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } -func (m *ResourceHandle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.StructuredData != nil { - { - size, err := m.StructuredData.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x2a } - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceModel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ResourceModel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.NamedResources != nil { - { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *ResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - { - size, err := m.ResourceRequestModel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.VendorParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceRequestModel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *ResourceRequestModel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ResourceRequestModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.NamedResources != nil { - { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - { - size, err := m.ResourceModel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0x1a - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.SuitableNodes != nil { + l = m.SuitableNodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *StructuredResourceHandle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *StructuredResourceHandle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *StructuredResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x22 - { - size, err := m.VendorClaimParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.VendorClassParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VendorParameters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VendorParameters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VendorParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceHandles) > 0 { - for _, e := range m.ResourceHandles { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AvailableOnNodes != nil { - l = m.AvailableOnNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *AllocationResultModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *DriverAllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.VendorRequestParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.AllocationResultModel.Size() + l = len(m.AllocationMode) n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + n += 2 return n } -func (m *DriverRequests) Size() (n int) { +func (m *DeviceRequestAllocationResult) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.DriverName) + l = len(m.Request) n += 1 + l + sovGenerated(uint64(l)) - l = m.VendorParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Requests) > 0 { - for _, e := range m.Requests { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamedResourcesAllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NamedResourcesAttribute) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) + l = len(m.Pool) n += 1 + l + sovGenerated(uint64(l)) - l = m.NamedResourcesAttributeValue.Size() + l = len(m.Device) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *NamedResourcesAttributeValue) Size() (n int) { +func (m *DeviceSelector) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.BoolValue != nil { - n += 2 - } - if m.StringValue != nil { - l = len(*m.StringValue) + if m.CEL != nil { + l = m.CEL.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.QuantityValue != nil { - l = m.QuantityValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IntValue != nil { - n += 1 + sovGenerated(uint64(*m.IntValue)) - } - if m.IntSliceValue != nil { - l = m.IntSliceValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StringSliceValue != nil { - l = m.StringSliceValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VersionValue != nil { - l = len(*m.VersionValue) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NamedResourcesFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Selector) - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *NamedResourcesInstance) Size() (n int) { +func (m *OpaqueDeviceConfiguration) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamedResourcesIntSlice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Ints) > 0 { - for _, e := range m.Ints { - n += 1 + sovGenerated(uint64(e)) - } - } - return n -} - -func (m *NamedResourcesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Selector) + l = m.Parameters.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *NamedResourcesResources) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Instances) > 0 { - for _, e := range m.Instances { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamedResourcesStringSlice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Strings) > 0 { - for _, s := range m.Strings { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *PodSchedulingContext) Size() (n int) { if m == nil { return 0 @@ -3803,60 +3379,6 @@ func (m *ResourceClaimList) Size() (n int) { return n } -func (m *ResourceClaimParameters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.GeneratedFrom != nil { - l = m.GeneratedFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if len(m.DriverRequests) > 0 { - for _, e := range m.DriverRequests { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimParametersList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimParametersReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *ResourceClaimSchedulingStatus) Size() (n int) { if m == nil { return 0 @@ -3880,13 +3402,9 @@ func (m *ResourceClaimSpec) Size() (n int) { } var l int _ = l - l = len(m.ResourceClassName) + l = m.Devices.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.ParametersRef != nil { - l = m.ParametersRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.AllocationMode) + l = len(m.Controller) n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3897,8 +3415,6 @@ func (m *ResourceClaimStatus) Size() (n int) { } var l int _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) if m.Allocation != nil { l = m.Allocation.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -3956,187 +3472,16 @@ func (m *ResourceClaimTemplateSpec) Size() (n int) { return n } -func (m *ResourceClass) Size() (n int) { +func (m *ResourcePool) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DriverName) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if m.ParametersRef != nil { - l = m.ParametersRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SuitableNodes != nil { - l = m.SuitableNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StructuredParameters != nil { - n += 2 - } - return n -} - -func (m *ResourceClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClassParameters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.GeneratedFrom != nil { - l = m.GeneratedFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.VendorParameters) > 0 { - for _, e := range m.VendorParameters { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Filters) > 0 { - for _, e := range m.Filters { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClassParametersList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClassParametersReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ResourceFilterModel.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceFilterModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceHandle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - if m.StructuredData != nil { - l = m.StructuredData.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.VendorParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.ResourceRequestModel.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceRequestModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) return n } @@ -4148,11 +3493,7 @@ func (m *ResourceSlice) Size() (n int) { _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ResourceModel.Size() + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4174,20 +3515,25 @@ func (m *ResourceSliceList) Size() (n int) { return n } -func (m *StructuredResourceHandle) Size() (n int) { +func (m *ResourceSliceSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.VendorClassParameters.Size() + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) - l = m.VendorClaimParameters.Size() + l = m.Pool.Size() n += 1 + l + sovGenerated(uint64(l)) l = len(m.NodeName) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Results) > 0 { - for _, e := range m.Results { + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -4195,19 +3541,6 @@ func (m *StructuredResourceHandle) Size() (n int) { return n } -func (m *VendorParameters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Parameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -4218,161 +3551,280 @@ func (this *AllocationResult) String() string { if this == nil { return "nil" } - repeatedStringForResourceHandles := "[]ResourceHandle{" - for _, f := range this.ResourceHandles { - repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceHandles += "}" s := strings.Join([]string{`&AllocationResult{`, - `ResourceHandles:` + repeatedStringForResourceHandles + `,`, - `AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, + `}`, + }, "") + return s +} +func (this *BasicDevice) String() string { + if this == nil { + return "nil" + } + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&BasicDevice{`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, `}`, }, "") return s } -func (this *AllocationResultModel) String() string { +func (this *CELDeviceSelector) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&AllocationResultModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesAllocationResult", "NamedResourcesAllocationResult", 1) + `,`, + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, `}`, }, "") return s } -func (this *DriverAllocationResult) String() string { +func (this *Device) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DriverAllocationResult{`, - `VendorRequestParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorRequestParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `AllocationResultModel:` + strings.Replace(strings.Replace(this.AllocationResultModel.String(), "AllocationResultModel", "AllocationResultModel", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, `}`, }, "") return s } -func (this *DriverRequests) String() string { +func (this *DeviceClaim) String() string { if this == nil { return "nil" } - repeatedStringForRequests := "[]ResourceRequest{" + repeatedStringForRequests := "[]DeviceRequest{" for _, f := range this.Requests { - repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "ResourceRequest", "ResourceRequest", 1), `&`, ``, 1) + "," + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," } repeatedStringForRequests += "}" - s := strings.Join([]string{`&DriverRequests{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `VendorParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, `}`, }, "") return s } -func (this *NamedResourcesAllocationResult) String() string { +func (this *DeviceClaimConfiguration) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&NamedResourcesAllocationResult{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *NamedResourcesAttribute) String() string { +func (this *DeviceClass) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&NamedResourcesAttribute{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `NamedResourcesAttributeValue:` + strings.Replace(strings.Replace(this.NamedResourcesAttributeValue.String(), "NamedResourcesAttributeValue", "NamedResourcesAttributeValue", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *NamedResourcesAttributeValue) String() string { +func (this *DeviceClassConfiguration) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&NamedResourcesAttributeValue{`, - `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, - `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, - `QuantityValue:` + strings.Replace(fmt.Sprintf("%v", this.QuantityValue), "Quantity", "resource.Quantity", 1) + `,`, - `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, - `IntSliceValue:` + strings.Replace(this.IntSliceValue.String(), "NamedResourcesIntSlice", "NamedResourcesIntSlice", 1) + `,`, - `StringSliceValue:` + strings.Replace(this.StringSliceValue.String(), "NamedResourcesStringSlice", "NamedResourcesStringSlice", 1) + `,`, - `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *NamedResourcesFilter) String() string { +func (this *DeviceClassList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&NamedResourcesFilter{`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *NamedResourcesInstance) String() string { +func (this *DeviceClassSpec) String() string { if this == nil { return "nil" } - repeatedStringForAttributes := "[]NamedResourcesAttribute{" - for _, f := range this.Attributes { - repeatedStringForAttributes += strings.Replace(strings.Replace(f.String(), "NamedResourcesAttribute", "NamedResourcesAttribute", 1), `&`, ``, 1) + "," + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," } - repeatedStringForAttributes += "}" - s := strings.Join([]string{`&NamedResourcesInstance{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Attributes:` + repeatedStringForAttributes + `,`, + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, `}`, }, "") return s } -func (this *NamedResourcesIntSlice) String() string { +func (this *DeviceConfiguration) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&NamedResourcesIntSlice{`, - `Ints:` + fmt.Sprintf("%v", this.Ints) + `,`, + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, `}`, }, "") return s } -func (this *NamedResourcesRequest) String() string { +func (this *DeviceConstraint) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&NamedResourcesRequest{`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, `}`, }, "") return s } -func (this *NamedResourcesResources) String() string { +func (this *DeviceRequest) String() string { if this == nil { return "nil" } - repeatedStringForInstances := "[]NamedResourcesInstance{" - for _, f := range this.Instances { - repeatedStringForInstances += strings.Replace(strings.Replace(f.String(), "NamedResourcesInstance", "NamedResourcesInstance", 1), `&`, ``, 1) + "," + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + fmt.Sprintf("%v", this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" } - repeatedStringForInstances += "}" - s := strings.Join([]string{`&NamedResourcesResources{`, - `Instances:` + repeatedStringForInstances + `,`, + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, `}`, }, "") return s } -func (this *NamedResourcesStringSlice) String() string { +func (this *OpaqueDeviceConfiguration) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&NamedResourcesStringSlice{`, - `Strings:` + fmt.Sprintf("%v", this.Strings) + `,`, + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4472,52 +3924,6 @@ func (this *ResourceClaimList) String() string { }, "") return s } -func (this *ResourceClaimParameters) String() string { - if this == nil { - return "nil" - } - repeatedStringForDriverRequests := "[]DriverRequests{" - for _, f := range this.DriverRequests { - repeatedStringForDriverRequests += strings.Replace(strings.Replace(f.String(), "DriverRequests", "DriverRequests", 1), `&`, ``, 1) + "," - } - repeatedStringForDriverRequests += "}" - s := strings.Join([]string{`&ResourceClaimParameters{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `GeneratedFrom:` + strings.Replace(this.GeneratedFrom.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`, - `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, - `DriverRequests:` + repeatedStringForDriverRequests + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimParametersList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClaimParameters{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimParameters", "ResourceClaimParameters", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClaimParametersList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimParametersReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimParametersReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} func (this *ResourceClaimSchedulingStatus) String() string { if this == nil { return "nil" @@ -4534,9 +3940,8 @@ func (this *ResourceClaimSpec) String() string { return "nil" } s := strings.Join([]string{`&ResourceClaimSpec{`, - `ResourceClassName:` + fmt.Sprintf("%v", this.ResourceClassName) + `,`, - `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`, - `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, `}`, }, "") return s @@ -4551,7 +3956,6 @@ func (this *ResourceClaimStatus) String() string { } repeatedStringForReservedFor += "}" s := strings.Join([]string{`&ResourceClaimStatus{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, `ReservedFor:` + repeatedStringForReservedFor + `,`, `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, @@ -4597,1496 +4001,74 @@ func (this *ResourceClaimTemplateSpec) String() string { }, "") return s } -func (this *ResourceClass) String() string { +func (this *ResourcePool) String() string { if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`, - `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `StructuredParameters:` + valueToStringGenerated(this.StructuredParameters) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClass", "ResourceClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassParameters) String() string { - if this == nil { - return "nil" - } - repeatedStringForVendorParameters := "[]VendorParameters{" - for _, f := range this.VendorParameters { - repeatedStringForVendorParameters += strings.Replace(strings.Replace(f.String(), "VendorParameters", "VendorParameters", 1), `&`, ``, 1) + "," - } - repeatedStringForVendorParameters += "}" - repeatedStringForFilters := "[]ResourceFilter{" - for _, f := range this.Filters { - repeatedStringForFilters += strings.Replace(strings.Replace(f.String(), "ResourceFilter", "ResourceFilter", 1), `&`, ``, 1) + "," - } - repeatedStringForFilters += "}" - s := strings.Join([]string{`&ResourceClassParameters{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `GeneratedFrom:` + strings.Replace(this.GeneratedFrom.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`, - `VendorParameters:` + repeatedStringForVendorParameters + `,`, - `Filters:` + repeatedStringForFilters + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassParametersList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClassParameters{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClassParameters", "ResourceClassParameters", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClassParametersList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassParametersReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClassParametersReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFilter) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceFilter{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `ResourceFilterModel:` + strings.Replace(strings.Replace(this.ResourceFilterModel.String(), "ResourceFilterModel", "ResourceFilterModel", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFilterModel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceFilterModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesFilter", "NamedResourcesFilter", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceHandle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceHandle{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `StructuredData:` + strings.Replace(this.StructuredData.String(), "StructuredResourceHandle", "StructuredResourceHandle", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceModel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesResources", "NamedResourcesResources", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceRequest{`, - `VendorParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `ResourceRequestModel:` + strings.Replace(strings.Replace(this.ResourceRequestModel.String(), "ResourceRequestModel", "ResourceRequestModel", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRequestModel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceRequestModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesRequest", "NamedResourcesRequest", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceSlice) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceSlice{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `ResourceModel:` + strings.Replace(strings.Replace(this.ResourceModel.String(), "ResourceModel", "ResourceModel", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceSliceList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceSlice{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceSliceList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *StructuredResourceHandle) String() string { - if this == nil { - return "nil" - } - repeatedStringForResults := "[]DriverAllocationResult{" - for _, f := range this.Results { - repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DriverAllocationResult", "DriverAllocationResult", 1), `&`, ``, 1) + "," - } - repeatedStringForResults += "}" - s := strings.Join([]string{`&StructuredResourceHandle{`, - `VendorClassParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorClassParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `VendorClaimParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorClaimParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `Results:` + repeatedStringForResults + `,`, - `}`, - }, "") - return s -} -func (this *VendorParameters) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VendorParameters{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{}) - if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableOnNodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AvailableOnNodes == nil { - m.AvailableOnNodes = &v1.NodeSelector{} - } - if err := m.AvailableOnNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Shareable = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllocationResultModel) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllocationResultModel: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllocationResultModel: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesAllocationResult{} - } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DriverAllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DriverAllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DriverAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorRequestParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VendorRequestParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocationResultModel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AllocationResultModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DriverRequests) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DriverRequests: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DriverRequests: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VendorParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requests = append(m.Requests, ResourceRequest{}) - if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesAllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesAllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesAttribute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesAttribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesAttribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResourcesAttributeValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.NamedResourcesAttributeValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesAttributeValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesAttributeValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesAttributeValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.BoolValue = &b - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.StringValue = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuantityValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.QuantityValue == nil { - m.QuantityValue = &resource.Quantity{} - } - if err := m.QuantityValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IntValue = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IntSliceValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IntSliceValue == nil { - m.IntSliceValue = &NamedResourcesIntSlice{} - } - if err := m.IntSliceValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringSliceValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StringSliceValue == nil { - m.StringSliceValue = &NamedResourcesStringSlice{} - } - if err := m.StringSliceValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.VersionValue = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesFilter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesFilter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesFilter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesInstance) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesInstance: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesInstance: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, NamedResourcesAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesIntSlice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesIntSlice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesIntSlice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Ints = append(m.Ints, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Ints) == 0 { - m.Ints = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Ints = append(m.Ints, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Ints", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *NamedResourcesRequest) Unmarshal(dAtA []byte) error { +func (m *AllocationResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6109,15 +4091,84 @@ func (m *NamedResourcesRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v1.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6145,7 +4196,7 @@ func (m *NamedResourcesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Selector = string(dAtA[iNdEx:postIndex]) + m.Controller = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6168,7 +4219,7 @@ func (m *NamedResourcesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *NamedResourcesResources) Unmarshal(dAtA []byte) error { +func (m *BasicDevice) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6191,15 +4242,15 @@ func (m *NamedResourcesResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesResources: wiretype end group for non-group") + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6226,10 +4277,234 @@ func (m *NamedResourcesResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Instances = append(m.Instances, NamedResourcesInstance{}) - if err := m.Instances[len(m.Instances)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Capacity[QualifiedName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -6252,7 +4527,7 @@ func (m *NamedResourcesResources) Unmarshal(dAtA []byte) error { } return nil } -func (m *NamedResourcesStringSlice) Unmarshal(dAtA []byte) error { +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6275,15 +4550,15 @@ func (m *NamedResourcesStringSlice) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesStringSlice: wiretype end group for non-group") + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesStringSlice: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strings", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6311,7 +4586,7 @@ func (m *NamedResourcesStringSlice) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Strings = append(m.Strings, string(dAtA[iNdEx:postIndex])) + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6334,7 +4609,7 @@ func (m *NamedResourcesStringSlice) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { +func (m *Device) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6357,17 +4632,17 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") + return fmt.Errorf("proto: Device: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6377,28 +4652,27 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6425,40 +4699,10 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Basic == nil { + m.Basic = &BasicDevice{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6483,7 +4727,7 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6506,17 +4750,17 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6526,28 +4770,59 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6574,8 +4849,7 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PodSchedulingContext{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6600,7 +4874,7 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6623,17 +4897,17 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6643,29 +4917,31 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SelectedNode = string(dAtA[iNdEx:postIndex]) + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6675,23 +4951,25 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6714,7 +4992,7 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6737,17 +5015,17 @@ func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) } - var msglen int + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6757,81 +5035,17 @@ func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) - if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaim) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6841,30 +5055,18 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: + b := bool(v != 0) + m.BoolValue = &b + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6874,30 +5076,30 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6907,24 +5109,24 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s iNdEx = postIndex default: iNdEx = preIndex @@ -6947,7 +5149,7 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6970,17 +5172,17 @@ func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6990,61 +5192,31 @@ func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7054,29 +5226,31 @@ func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7086,23 +5260,25 @@ func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -7125,7 +5301,7 @@ func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7148,17 +5324,17 @@ func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7168,28 +5344,27 @@ func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7216,8 +5391,7 @@ func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ResourceClaim{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7242,7 +5416,7 @@ func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error { +func (m *DeviceClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7265,10 +5439,10 @@ func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimParameters: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimParameters: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7306,7 +5480,7 @@ func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GeneratedFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7333,36 +5507,63 @@ func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.GeneratedFrom == nil { - m.GeneratedFrom = &ResourceClaimParametersReference{} - } - if err := m.GeneratedFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.Shareable = bool(v != 0) - case 4: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverRequests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7389,8 +5590,7 @@ func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DriverRequests = append(m.DriverRequests, DriverRequests{}) - if err := m.DriverRequests[len(m.DriverRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7415,7 +5615,7 @@ func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimParametersList) Unmarshal(dAtA []byte) error { +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7438,10 +5638,10 @@ func (m *ResourceClaimParametersList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimParametersList: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimParametersList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7506,7 +5706,7 @@ func (m *ResourceClaimParametersList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ResourceClaimParameters{}) + m.Items = append(m.Items, DeviceClass{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -7532,7 +5732,7 @@ func (m *ResourceClaimParametersList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7555,17 +5755,17 @@ func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimParametersReference: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7575,29 +5775,31 @@ func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroup = string(dAtA[iNdEx:postIndex]) + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7607,29 +5809,117 @@ func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SuitableNodes == nil { + m.SuitableNodes = &v1.NodeSelector{} + } + if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7639,23 +5929,27 @@ func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -7678,7 +5972,7 @@ func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7701,15 +5995,15 @@ func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7737,11 +6031,11 @@ func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7769,7 +6063,8 @@ func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s iNdEx = postIndex default: iNdEx = preIndex @@ -7792,7 +6087,7 @@ func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7815,15 +6110,15 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7851,11 +6146,43 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceClassName = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7882,14 +6209,12 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ParametersRef == nil { - m.ParametersRef = &ResourceClaimParametersReference{} - } - if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) } @@ -7919,8 +6244,47 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllocationMode = AllocationMode(dAtA[iNdEx:postIndex]) + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AdminAccess = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7942,7 +6306,7 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7965,15 +6329,15 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8001,13 +6365,13 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DriverName = string(dAtA[iNdEx:postIndex]) + m.Request = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8017,33 +6381,61 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Allocation == nil { - m.Allocation = &AllocationResult{} + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) } - if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8053,31 +6445,79 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) - if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - var v int + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8087,12 +6527,28 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.DeallocationRequested = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8114,7 +6570,7 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8137,17 +6593,17 @@ func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8157,28 +6613,27 @@ func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8205,7 +6660,7 @@ func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8230,7 +6685,7 @@ func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8253,15 +6708,15 @@ func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8288,13 +6743,13 @@ func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8321,8 +6776,40 @@ func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ResourceClaimTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8347,7 +6834,7 @@ func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8370,15 +6857,15 @@ func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8405,13 +6892,13 @@ func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8438,7 +6925,8 @@ func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, PodSchedulingContext{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8463,7 +6951,7 @@ func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClass) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8486,17 +6974,17 @@ func (m *ResourceClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClass: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8506,28 +6994,27 @@ func (m *ResourceClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SelectedNode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8555,47 +7042,61 @@ func (m *ResourceClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DriverName = string(dAtA[iNdEx:postIndex]) + m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ParametersRef == nil { - m.ParametersRef = &ResourceClassParametersReference{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8622,34 +7123,11 @@ func (m *ResourceClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SuitableNodes == nil { - m.SuitableNodes = &v1.NodeSelector{} - } - if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) + if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StructuredParameters", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.StructuredParameters = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8671,7 +7149,7 @@ func (m *ResourceClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClassList) Unmarshal(dAtA []byte) error { +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8694,15 +7172,15 @@ func (m *ResourceClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClassList: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8729,13 +7207,13 @@ func (m *ResourceClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8762,8 +7240,40 @@ func (m *ResourceClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ResourceClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8788,7 +7298,7 @@ func (m *ResourceClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8811,17 +7321,17 @@ func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClassParameters: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassParameters: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8831,30 +7341,29 @@ func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.APIGroup = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GeneratedFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8864,33 +7373,29 @@ func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.GeneratedFrom == nil { - m.GeneratedFrom = &ResourceClassParametersReference{} - } - if err := m.GeneratedFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8900,31 +7405,29 @@ func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.VendorParameters = append(m.VendorParameters, VendorParameters{}) - if err := m.VendorParameters[len(m.VendorParameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8934,25 +7437,23 @@ func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Filters = append(m.Filters, ResourceFilter{}) - if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8975,7 +7476,7 @@ func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClassParametersList) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8998,10 +7499,10 @@ func (m *ResourceClassParametersList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClassParametersList: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassParametersList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9066,7 +7567,7 @@ func (m *ResourceClassParametersList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ResourceClassParameters{}) + m.Items = append(m.Items, ResourceClaim{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9092,7 +7593,7 @@ func (m *ResourceClassParametersList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9115,77 +7616,13 @@ func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClassParametersReference: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } @@ -9217,9 +7654,9 @@ func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9247,7 +7684,7 @@ func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -9270,7 +7707,7 @@ func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceFilter) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9293,17 +7730,17 @@ func (m *ResourceFilter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceFilter: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFilter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9313,29 +7750,30 @@ func (m *ResourceFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DriverName = string(dAtA[iNdEx:postIndex]) + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFilterModel", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9345,24 +7783,23 @@ func (m *ResourceFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ResourceFilterModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Controller = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -9385,7 +7822,7 @@ func (m *ResourceFilter) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceFilterModel) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9408,15 +7845,15 @@ func (m *ResourceFilterModel) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceFilterModel: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFilterModel: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9443,13 +7880,67 @@ func (m *ResourceFilterModel) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesFilter{} + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeallocationRequested = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9471,7 +7962,7 @@ func (m *ResourceFilterModel) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceHandle) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9494,49 +7985,17 @@ func (m *ResourceHandle) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9546,27 +8005,28 @@ func (m *ResourceHandle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StructuredData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9593,10 +8053,7 @@ func (m *ResourceHandle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StructuredData == nil { - m.StructuredData = &StructuredResourceHandle{} - } - if err := m.StructuredData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9621,7 +8078,7 @@ func (m *ResourceHandle) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceModel) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9644,15 +8101,15 @@ func (m *ResourceModel) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceModel: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceModel: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9679,10 +8136,41 @@ func (m *ResourceModel) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesResources{} + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9707,7 +8195,7 @@ func (m *ResourceModel) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceRequest) Unmarshal(dAtA []byte) error { +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9730,15 +8218,15 @@ func (m *ResourceRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9765,13 +8253,13 @@ func (m *ResourceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.VendorParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRequestModel", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9798,7 +8286,7 @@ func (m *ResourceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ResourceRequestModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9823,7 +8311,7 @@ func (m *ResourceRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceRequestModel) Unmarshal(dAtA []byte) error { +func (m *ResourcePool) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9846,17 +8334,17 @@ func (m *ResourceRequestModel) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceRequestModel: wiretype end group for non-group") + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRequestModel: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9866,28 +8354,62 @@ func (m *ResourceRequestModel) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesRequest{} + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9973,71 +8495,7 @@ func (m *ResourceSlice) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceModel", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10064,7 +8522,7 @@ func (m *ResourceSlice) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ResourceModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10206,7 +8664,7 @@ func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { } return nil } -func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error { +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10229,17 +8687,17 @@ func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StructuredResourceHandle: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StructuredResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorClassParameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10249,28 +8707,27 @@ func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.VendorClassParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorClaimParameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10297,11 +8754,11 @@ func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.VendorClaimParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } @@ -10333,9 +8790,9 @@ func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error { } m.NodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10362,66 +8819,18 @@ func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, DriverAllocationResult{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.NodeSelector == nil { + m.NodeSelector = &v1.NodeSelector{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VendorParameters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VendorParameters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VendorParameters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10431,27 +8840,15 @@ func (m *VendorParameters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.AllNodes = bool(v != 0) + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10478,7 +8875,8 @@ func (m *VendorParameters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto new file mode 100644 index 0000000000..b4428ad452 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -0,0 +1,912 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1alpha3; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1alpha3"; + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; + + // Controller is the name of the DRA driver which handled the + // allocation. That driver is also responsible for deallocating the + // claim. It is empty when the claim can be deallocated without + // involving a driver. + // + // A driver may allocate devices provided by other drivers, so this + // driver name here can be different from the driver names listed for + // the results. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional string controller = 4; +} + +// BasicDevice defines one device instance. +message BasicDevice { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 1; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 2; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // +required + optional string expression = 1; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + optional BasicDevice basic = 2; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a claim that has not been allocated yet *and* that claim + // gets allocated through a control plane controller. It is ignored + // when the claim does not use a control plane controller + // for allocation. + // + // Setting this field is optional. If unset, all Nodes are candidates. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional .k8s.io.api.core.v1.NodeSelector suitableNodes = 3; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // +optional + // +default=false + optional bool adminAccess = 6; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +message OpaqueDeviceConfiguration { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // +required + optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; +} + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DRAControlPlaneController +// feature gate. +message PodSchedulingContext { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes where resources for the Pod are needed. + optional PodSchedulingContextSpec spec = 2; + + // Status describes where resources for the Pod can be allocated. + // + // +optional + optional PodSchedulingContextStatus status = 3; +} + +// PodSchedulingContextList is a collection of Pod scheduling objects. +message PodSchedulingContextList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of PodSchedulingContext objects. + repeated PodSchedulingContext items = 2; +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +message PodSchedulingContextSpec { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // + // +optional + optional string selectedNode = 1; + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + repeated string potentialNodes = 2; +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +message PodSchedulingContextStatus { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + repeated ResourceClaimSchedulingStatus resourceClaims = 1; +} + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaim { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + optional ResourceClaimSpec spec = 2; + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + optional ResourceClaimStatus status = 3; +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +message ResourceClaimConsumerReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Resource is the type of resource being referenced, for example "pods". + // +required + optional string resource = 3; + + // Name is the name of resource being referenced. + // +required + optional string name = 4; + + // UID identifies exactly one incarnation of the resource. + // +required + optional string uid = 5; +} + +// ResourceClaimList is a collection of claims. +message ResourceClaimList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claims. + repeated ResourceClaim items = 2; +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +message ResourceClaimSchedulingStatus { + // Name matches the pod.spec.resourceClaims[*].Name field. + // + // +required + optional string name = 1; + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + repeated string unsuitableNodes = 2; +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +message ResourceClaimSpec { + // Devices defines how to request devices. + // + // +optional + optional DeviceClaim devices = 1; + + // Controller is the name of the DRA driver that is meant + // to handle allocation of this claim. If empty, allocation is handled + // by the scheduler while scheduling a pod. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional string controller = 2; +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +message ResourceClaimStatus { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + optional AllocationResult allocation = 1; + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + repeated ResourceClaimConsumerReference reservedFor = 2; + + // Indicates that a claim is to be deallocated. While this is set, + // no new consumers may be added to ReservedFor. + // + // This is only used if the claim needs to be deallocated by a DRA driver. + // That driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional bool deallocationRequested = 3; +} + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaimTemplate { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + optional ResourceClaimTemplateSpec spec = 2; +} + +// ResourceClaimTemplateList is a collection of claim templates. +message ResourceClaimTemplateList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claim templates. + repeated ResourceClaimTemplate items = 2; +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +message ResourceClaimTemplateSpec { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + optional ResourceClaimSpec spec = 2; +} + +// ResourcePool describes the pool that ResourceSlices belong to. +message ResourcePool { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + optional string name = 1; + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + optional int64 generation = 2; + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + optional int64 resourceSliceCount = 3; +} + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceSlice { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + optional ResourceSliceSpec spec = 2; +} + +// ResourceSliceList is a collection of ResourceSlices. +message ResourceSliceList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource ResourceSlices. + repeated ResourceSlice items = 2; +} + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +message ResourceSliceSpec { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + optional string driver = 1; + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + optional ResourcePool pool = 2; + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + optional string nodeName = 3; + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4; + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional bool allNodes = 5; + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + repeated Device devices = 6; +} + diff --git a/vendor/k8s.io/api/resource/v1alpha2/register.go b/vendor/k8s.io/api/resource/v1alpha3/register.go similarity index 86% rename from vendor/k8s.io/api/resource/v1alpha2/register.go rename to vendor/k8s.io/api/resource/v1alpha3/register.go index 893fb4c1e5..74044e8cf0 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/register.go +++ b/vendor/k8s.io/api/resource/v1alpha3/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "resource.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha3"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -44,8 +44,8 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ResourceClass{}, - &ResourceClassList{}, + &DeviceClass{}, + &DeviceClassList{}, &ResourceClaim{}, &ResourceClaimList{}, &ResourceClaimTemplate{}, @@ -54,15 +54,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &PodSchedulingContextList{}, &ResourceSlice{}, &ResourceSliceList{}, - &ResourceClaimParameters{}, - &ResourceClaimParametersList{}, - &ResourceClassParameters{}, - &ResourceClassParametersList{}, ) - // Add common types - scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) - // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go new file mode 100644 index 0000000000..4efd2491de --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -0,0 +1,1048 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + // Finalizer is the finalizer that gets set for claims + // which were allocated through a builtin controller. + // Reserved for use by Kubernetes, DRA driver controllers must + // use their own finalizer. + Finalizer = "resource.kubernetes.io/delete-protection" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +const ( + // ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.NodeName]. + ResourceSliceSelectorNodeName = "spec.nodeName" + // ResourceSliceSelectorDriver can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.Driver]. + ResourceSliceSelectorDriver = "spec.driver" +) + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +type ResourceSliceSpec struct { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"` + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"` + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"` + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"` + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` +} + +// ResourcePool describes the pool that ResourceSlices belong to. +type ResourcePool struct { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"` + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"` +} + +const ResourceSliceMaxSharedCapacity = 128 +const ResourceSliceMaxDevices = 128 +const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name. + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +type Device struct { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"` +} + +// BasicDevice defines one device instance. +type BasicDevice struct { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"` + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Capacity map[QualifiedName]resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"` +} + +// Limit for the sum of the number of entries in both ResourceSlices. +const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32 + +// QualifiedName is the name of a device attribute or capacity. +// +// Attributes and capacities are defined either by the owner of the specific +// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes +// project). Because they are sometimes compared across devices, a given name +// is expected to mean the same thing and have the same type on all devices. +// +// Names must be either a C identifier (e.g. "theName") or a DNS subdomain +// followed by a slash ("/") followed by a C identifier +// (e.g. "dra.example.com/theName"). Names which do not include the +// domain prefix are assumed to be part of the driver's domain. Attributes +// or capacities defined by 3rd parties must include the domain prefix. +// +// The maximum length for the DNS subdomain is 63 characters (same as +// for driver names) and the maximum length of the C identifier +// is 32. +type QualifiedName string + +// FullyQualifiedName is a QualifiedName where the domain is set. +type FullyQualifiedName string + +// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`). +const DeviceMaxIDLength = 32 + +// DeviceAttribute must have exactly one field set. +type DeviceAttribute struct { + // The Go field names below have a Value suffix to avoid a conflict between the + // field "String" and the corresponding method. That method is required. + // The Kubernetes API is defined without that suffix to keep it more natural. + + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"` + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"` + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"` + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"` +} + +// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value. +const DeviceAttributeMaxValueLength = 64 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ResourceSliceList is a collection of ResourceSlices. +type ResourceSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource ResourceSlices. + Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +type ResourceClaimSpec struct { + // Devices defines how to request devices. + // + // +optional + Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` + + // Controller is the name of the DRA driver that is meant + // to handle allocation of this claim. If empty, allocation is handled + // by the scheduler while scheduling a pod. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +type DeviceClaim struct { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"` + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"` + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"` +} + +const ( + DeviceRequestsMaxSize = AllocationResultsMaxSize + DeviceConstraintsMaxSize = 32 + DeviceConfigMaxSize = 32 +) + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +type DeviceRequest struct { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"` + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"` + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"` + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"` + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // +optional + // +default=false + AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` +} + +const ( + DeviceSelectorsMaxSize = 32 +) + +type DeviceAllocationMode string + +// Valid [DeviceRequest.CountMode] values. +const ( + DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount") + DeviceAllocationModeAll = DeviceAllocationMode("All") +) + +// DeviceSelector must have exactly one field set. +type DeviceSelector struct { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"` +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +type CELDeviceSelector struct { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // +required + Expression string `json:"expression" protobuf:"bytes,1,name=expression"` +} + +// DeviceConstraint must have exactly one field set besides Requests. +type DeviceConstraint struct { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"` +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +type DeviceClaimConfiguration struct { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"` +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +type DeviceConfiguration struct { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"` +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +type OpaqueDeviceConfiguration struct { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // +required + Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"` +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +type ResourceClaimStatus struct { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"` + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` + + // Indicates that a claim is to be deallocated. While this is set, + // no new consumers may be added to ReservedFor. + // + // This is only used if the claim needs to be deallocated by a DRA driver. + // That driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` +} + +// ReservedForMaxSize is the maximum number of entries in +// claim.status.reservedFor. +const ResourceClaimReservedForMaxSize = 32 + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +type ResourceClaimConsumerReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the type of resource being referenced, for example "pods". + // +required + Resource string `json:"resource" protobuf:"bytes,3,name=resource"` + // Name is the name of resource being referenced. + // +required + Name string `json:"name" protobuf:"bytes,4,name=name"` + // UID identifies exactly one incarnation of the resource. + // +required + UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` +} + +// AllocationResult contains attributes of an allocated resource. +type AllocationResult struct { + // Devices is the result of allocating devices. + // + // +optional + Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"` + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` + + // Controller is the name of the DRA driver which handled the + // allocation. That driver is also responsible for deallocating the + // claim. It is empty when the claim can be deallocated without + // involving a driver. + // + // A driver may allocate devices provided by other drivers, so this + // driver name here can be different from the driver names listed for + // the results. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` +} + +// DeviceAllocationResult is the result of allocating devices. +type DeviceAllocationResult struct { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"` + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// AllocationResultsMaxSize represents the maximum number of +// entries in allocation.devices.results. +const AllocationResultsMaxSize = 32 + +// DeviceRequestAllocationResult contains the allocation result for one request. +type DeviceRequestAllocationResult struct { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + Request string `json:"request" protobuf:"bytes,1,name=request"` + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,2,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,3,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,4,name=device"` +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +type DeviceAllocationConfiguration struct { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"` + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"` +} + +type AllocationConfigSource string + +// Valid [DeviceAllocationConfiguration.Source] values. +const ( + AllocationConfigSourceClass = "FromClass" + AllocationConfigSourceClaim = "FromClaim" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimList is a collection of claims. +type ResourceClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claims. + Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DRAControlPlaneController +// feature gate. +type PodSchedulingContext struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes where resources for the Pod are needed. + Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes where resources for the Pod can be allocated. + // + // +optional + Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +type PodSchedulingContextSpec struct { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // + // +optional + SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +type PodSchedulingContextStatus struct { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` + + // If there ever is a need to support other kinds of resources + // than ResourceClaim, then new fields could get added here + // for those other resources. +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +type ResourceClaimSchedulingStatus struct { + // Name matches the pod.spec.resourceClaims[*].Name field. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` +} + +// PodSchedulingNodeListMaxSize defines the maximum number of entries in the +// node lists that are stored in PodSchedulingContext objects. This limit is part +// of the API. +const PodSchedulingNodeListMaxSize = 128 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContextList is a collection of Pod scheduling objects. +type PodSchedulingContextList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of PodSchedulingContext objects. + Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type DeviceClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +type DeviceClassSpec struct { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"` + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a claim that has not been allocated yet *and* that claim + // gets allocated through a control plane controller. It is ignored + // when the claim does not use a control plane controller + // for allocation. + // + // Setting this field is optional. If unset, all Nodes are candidates. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` +} + +// DeviceClassConfiguration is used in DeviceClass. +type DeviceClassConfiguration struct { + DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// DeviceClassList is a collection of classes. +type DeviceClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource classes. + Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaimTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +type ResourceClaimTemplateSpec struct { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplateList is a collection of claim templates. +type ResourceClaimTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claim templates. + Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go new file mode 100644 index 0000000000..1a44a971db --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -0,0 +1,404 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocationResult = map[string]string{ + "": "AllocationResult contains attributes of an allocated resource.", + "devices": "Devices is the result of allocating devices.", + "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", + "controller": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (AllocationResult) SwaggerDoc() map[string]string { + return map_AllocationResult +} + +var map_BasicDevice = map[string]string{ + "": "BasicDevice defines one device instance.", + "attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", +} + +func (BasicDevice) SwaggerDoc() map[string]string { + return map_BasicDevice +} + +var map_CELDeviceSelector = map[string]string{ + "": "CELDeviceSelector contains a CEL expression for selecting a device.", + "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)", +} + +func (CELDeviceSelector) SwaggerDoc() map[string]string { + return map_CELDeviceSelector +} + +var map_Device = map[string]string{ + "": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "basic": "Basic defines one device instance.", +} + +func (Device) SwaggerDoc() map[string]string { + return map_Device +} + +var map_DeviceAllocationConfiguration = map[string]string{ + "": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "source": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", +} + +func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string { + return map_DeviceAllocationConfiguration +} + +var map_DeviceAllocationResult = map[string]string{ + "": "DeviceAllocationResult is the result of allocating devices.", + "results": "Results lists all allocated devices.", + "config": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", +} + +func (DeviceAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceAllocationResult +} + +var map_DeviceAttribute = map[string]string{ + "": "DeviceAttribute must have exactly one field set.", + "int": "IntValue is a number.", + "bool": "BoolValue is a true/false value.", + "string": "StringValue is a string. Must not be longer than 64 characters.", + "version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", +} + +func (DeviceAttribute) SwaggerDoc() map[string]string { + return map_DeviceAttribute +} + +var map_DeviceClaim = map[string]string{ + "": "DeviceClaim defines how to request devices with a ResourceClaim.", + "requests": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", + "constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.", + "config": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", +} + +func (DeviceClaim) SwaggerDoc() map[string]string { + return map_DeviceClaim +} + +var map_DeviceClaimConfiguration = map[string]string{ + "": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", +} + +func (DeviceClaimConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClaimConfiguration +} + +var map_DeviceClass = map[string]string{ + "": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (DeviceClass) SwaggerDoc() map[string]string { + return map_DeviceClass +} + +var map_DeviceClassConfiguration = map[string]string{ + "": "DeviceClassConfiguration is used in DeviceClass.", +} + +func (DeviceClassConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClassConfiguration +} + +var map_DeviceClassList = map[string]string{ + "": "DeviceClassList is a collection of classes.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource classes.", +} + +func (DeviceClassList) SwaggerDoc() map[string]string { + return map_DeviceClassList +} + +var map_DeviceClassSpec = map[string]string{ + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", + "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (DeviceClassSpec) SwaggerDoc() map[string]string { + return map_DeviceClassSpec +} + +var map_DeviceConfiguration = map[string]string{ + "": "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.", + "opaque": "Opaque provides driver-specific configuration parameters.", +} + +func (DeviceConfiguration) SwaggerDoc() map[string]string { + return map_DeviceConfiguration +} + +var map_DeviceConstraint = map[string]string{ + "": "DeviceConstraint must have exactly one field set besides Requests.", + "requests": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", +} + +func (DeviceConstraint) SwaggerDoc() map[string]string { + return map_DeviceConstraint +} + +var map_DeviceRequest = map[string]string{ + "": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", + "name": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", + "deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", + "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", + "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.", +} + +func (DeviceRequest) SwaggerDoc() map[string]string { + return map_DeviceRequest +} + +var map_DeviceRequestAllocationResult = map[string]string{ + "": "DeviceRequestAllocationResult contains the allocation result for one request.", + "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", +} + +func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceRequestAllocationResult +} + +var map_DeviceSelector = map[string]string{ + "": "DeviceSelector must have exactly one field set.", + "cel": "CEL contains a CEL expression for selecting a device.", +} + +func (DeviceSelector) SwaggerDoc() map[string]string { + return map_DeviceSelector +} + +var map_OpaqueDeviceConfiguration = map[string]string{ + "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.", +} + +func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { + return map_OpaqueDeviceConfiguration +} + +var map_PodSchedulingContext = map[string]string{ + "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes where resources for the Pod are needed.", + "status": "Status describes where resources for the Pod can be allocated.", +} + +func (PodSchedulingContext) SwaggerDoc() map[string]string { + return map_PodSchedulingContext +} + +var map_PodSchedulingContextList = map[string]string{ + "": "PodSchedulingContextList is a collection of Pod scheduling objects.", + "metadata": "Standard list metadata", + "items": "Items is the list of PodSchedulingContext objects.", +} + +func (PodSchedulingContextList) SwaggerDoc() map[string]string { + return map_PodSchedulingContextList +} + +var map_PodSchedulingContextSpec = map[string]string{ + "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", + "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", + "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", +} + +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { + return map_PodSchedulingContextSpec +} + +var map_PodSchedulingContextStatus = map[string]string{ + "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", + "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", +} + +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { + return map_PodSchedulingContextStatus +} + +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes what is being requested and how to configure it. The spec is immutable.", + "status": "Status describes whether the claim is ready to use and what has been allocated.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + +var map_ResourceClaimConsumerReference = map[string]string{ + "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "resource": "Resource is the type of resource being referenced, for example \"pods\".", + "name": "Name is the name of resource being referenced.", + "uid": "UID identifies exactly one incarnation of the resource.", +} + +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { + return map_ResourceClaimConsumerReference +} + +var map_ResourceClaimList = map[string]string{ + "": "ResourceClaimList is a collection of claims.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claims.", +} + +func (ResourceClaimList) SwaggerDoc() map[string]string { + return map_ResourceClaimList +} + +var map_ResourceClaimSchedulingStatus = map[string]string{ + "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", + "name": "Name matches the pod.spec.resourceClaims[*].Name field.", + "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", +} + +func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimSchedulingStatus +} + +var map_ResourceClaimSpec = map[string]string{ + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", + "controller": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (ResourceClaimSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimSpec +} + +var map_ResourceClaimStatus = map[string]string{ + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "deallocationRequested": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (ResourceClaimStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimStatus +} + +var map_ResourceClaimTemplate = map[string]string{ + "": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", +} + +func (ResourceClaimTemplate) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplate +} + +var map_ResourceClaimTemplateList = map[string]string{ + "": "ResourceClaimTemplateList is a collection of claim templates.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claim templates.", +} + +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateList +} + +var map_ResourceClaimTemplateSpec = map[string]string{ + "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", +} + +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateSpec +} + +var map_ResourcePool = map[string]string{ + "": "ResourcePool describes the pool that ResourceSlices belong to.", + "name": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", + "generation": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", +} + +func (ResourcePool) SwaggerDoc() map[string]string { + return map_ResourcePool +} + +var map_ResourceSlice = map[string]string{ + "": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (ResourceSlice) SwaggerDoc() map[string]string { + return map_ResourceSlice +} + +var map_ResourceSliceList = map[string]string{ + "": "ResourceSliceList is a collection of ResourceSlices.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource ResourceSlices.", +} + +func (ResourceSliceList) SwaggerDoc() map[string]string { + return map_ResourceSliceList +} + +var map_ResourceSliceSpec = map[string]string{ + "": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", + "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", + "pool": "Pool describes the pool that this ResourceSlice belongs to.", + "nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", + "nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", +} + +func (ResourceSliceSpec) SwaggerDoc() map[string]string { + return map_ResourceSliceSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go similarity index 60% rename from vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go rename to vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go index 52de8e1ad5..58171df1f2 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go @@ -19,25 +19,20 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { *out = *in - if in.ResourceHandles != nil { - in, out := &in.ResourceHandles, &out.ResourceHandles - *out = make([]ResourceHandle, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AvailableOnNodes != nil { - in, out := &in.AvailableOnNodes, &out.AvailableOnNodes + in.Devices.DeepCopyInto(&out.Devices) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector *out = new(v1.NodeSelector) (*in).DeepCopyInto(*out) } @@ -55,134 +50,140 @@ func (in *AllocationResult) DeepCopy() *AllocationResult { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllocationResultModel) DeepCopyInto(out *AllocationResultModel) { +func (in *BasicDevice) DeepCopyInto(out *BasicDevice) { *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesAllocationResult) - **out = **in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[QualifiedName]DeviceAttribute, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(map[QualifiedName]resource.Quantity, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResultModel. -func (in *AllocationResultModel) DeepCopy() *AllocationResultModel { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice. +func (in *BasicDevice) DeepCopy() *BasicDevice { if in == nil { return nil } - out := new(AllocationResultModel) + out := new(BasicDevice) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverAllocationResult) DeepCopyInto(out *DriverAllocationResult) { +func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) { *out = *in - in.VendorRequestParameters.DeepCopyInto(&out.VendorRequestParameters) - in.AllocationResultModel.DeepCopyInto(&out.AllocationResultModel) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverAllocationResult. -func (in *DriverAllocationResult) DeepCopy() *DriverAllocationResult { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector. +func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector { if in == nil { return nil } - out := new(DriverAllocationResult) + out := new(CELDeviceSelector) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverRequests) DeepCopyInto(out *DriverRequests) { +func (in *Device) DeepCopyInto(out *Device) { *out = *in - in.VendorParameters.DeepCopyInto(&out.VendorParameters) - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make([]ResourceRequest, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicDevice) + (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverRequests. -func (in *DriverRequests) DeepCopy() *DriverRequests { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { if in == nil { return nil } - out := new(DriverRequests) + out := new(Device) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesAllocationResult) DeepCopyInto(out *NamedResourcesAllocationResult) { +func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) { *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAllocationResult. -func (in *NamedResourcesAllocationResult) DeepCopy() *NamedResourcesAllocationResult { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration. +func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration { if in == nil { return nil } - out := new(NamedResourcesAllocationResult) + out := new(DeviceAllocationConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesAttribute) DeepCopyInto(out *NamedResourcesAttribute) { +func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) { *out = *in - in.NamedResourcesAttributeValue.DeepCopyInto(&out.NamedResourcesAttributeValue) + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]DeviceRequestAllocationResult, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceAllocationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttribute. -func (in *NamedResourcesAttribute) DeepCopy() *NamedResourcesAttribute { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult. +func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult { if in == nil { return nil } - out := new(NamedResourcesAttribute) + out := new(DeviceAllocationResult) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesAttributeValue) DeepCopyInto(out *NamedResourcesAttributeValue) { +func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) { *out = *in - if in.QuantityValue != nil { - in, out := &in.QuantityValue, &out.QuantityValue - x := (*in).DeepCopy() - *out = &x - } - if in.BoolValue != nil { - in, out := &in.BoolValue, &out.BoolValue - *out = new(bool) - **out = **in - } if in.IntValue != nil { in, out := &in.IntValue, &out.IntValue *out = new(int64) **out = **in } - if in.IntSliceValue != nil { - in, out := &in.IntSliceValue, &out.IntSliceValue - *out = new(NamedResourcesIntSlice) - (*in).DeepCopyInto(*out) + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(bool) + **out = **in } if in.StringValue != nil { in, out := &in.StringValue, &out.StringValue *out = new(string) **out = **in } - if in.StringSliceValue != nil { - in, out := &in.StringSliceValue, &out.StringSliceValue - *out = new(NamedResourcesStringSlice) - (*in).DeepCopyInto(*out) - } if in.VersionValue != nil { in, out := &in.VersionValue, &out.VersionValue *out = new(string) @@ -191,38 +192,127 @@ func (in *NamedResourcesAttributeValue) DeepCopyInto(out *NamedResourcesAttribut return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttributeValue. -func (in *NamedResourcesAttributeValue) DeepCopy() *NamedResourcesAttributeValue { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute. +func (in *DeviceAttribute) DeepCopy() *DeviceAttribute { if in == nil { return nil } - out := new(NamedResourcesAttributeValue) + out := new(DeviceAttribute) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesFilter) DeepCopyInto(out *NamedResourcesFilter) { +func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) { *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]DeviceRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]DeviceConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClaimConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesFilter. -func (in *NamedResourcesFilter) DeepCopy() *NamedResourcesFilter { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim. +func (in *DeviceClaim) DeepCopy() *DeviceClaim { if in == nil { return nil } - out := new(NamedResourcesFilter) + out := new(DeviceClaim) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesInstance) DeepCopyInto(out *NamedResourcesInstance) { +func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) { *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make([]NamedResourcesAttribute, len(*in)) + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration. +func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration { + if in == nil { + return nil + } + out := new(DeviceClaimConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClass) DeepCopyInto(out *DeviceClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass. +func (in *DeviceClass) DeepCopy() *DeviceClass { + if in == nil { + return nil + } + out := new(DeviceClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) { + *out = *in + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration. +func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration { + if in == nil { + return nil + } + out := new(DeviceClassConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeviceClass, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -230,59 +320,112 @@ func (in *NamedResourcesInstance) DeepCopyInto(out *NamedResourcesInstance) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesInstance. -func (in *NamedResourcesInstance) DeepCopy() *NamedResourcesInstance { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList. +func (in *DeviceClassList) DeepCopy() *DeviceClassList { if in == nil { return nil } - out := new(NamedResourcesInstance) + out := new(DeviceClassList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesIntSlice) DeepCopyInto(out *NamedResourcesIntSlice) { +func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { *out = *in - if in.Ints != nil { - in, out := &in.Ints, &out.Ints - *out = make([]int64, len(*in)) - copy(*out, *in) + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClassConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SuitableNodes != nil { + in, out := &in.SuitableNodes, &out.SuitableNodes + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec. +func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec { + if in == nil { + return nil + } + out := new(DeviceClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) { + *out = *in + if in.Opaque != nil { + in, out := &in.Opaque, &out.Opaque + *out = new(OpaqueDeviceConfiguration) + (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesIntSlice. -func (in *NamedResourcesIntSlice) DeepCopy() *NamedResourcesIntSlice { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration. +func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration { if in == nil { return nil } - out := new(NamedResourcesIntSlice) + out := new(DeviceConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesRequest) DeepCopyInto(out *NamedResourcesRequest) { +func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) { *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MatchAttribute != nil { + in, out := &in.MatchAttribute, &out.MatchAttribute + *out = new(FullyQualifiedName) + **out = **in + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesRequest. -func (in *NamedResourcesRequest) DeepCopy() *NamedResourcesRequest { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint. +func (in *DeviceConstraint) DeepCopy() *DeviceConstraint { if in == nil { return nil } - out := new(NamedResourcesRequest) + out := new(DeviceConstraint) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesResources) DeepCopyInto(out *NamedResourcesResources) { +func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) { *out = *in - if in.Instances != nil { - in, out := &in.Instances, &out.Instances - *out = make([]NamedResourcesInstance, len(*in)) + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -290,33 +433,66 @@ func (in *NamedResourcesResources) DeepCopyInto(out *NamedResourcesResources) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesResources. -func (in *NamedResourcesResources) DeepCopy() *NamedResourcesResources { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest. +func (in *DeviceRequest) DeepCopy() *DeviceRequest { if in == nil { return nil } - out := new(NamedResourcesResources) + out := new(DeviceRequest) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesStringSlice) DeepCopyInto(out *NamedResourcesStringSlice) { +func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) { *out = *in - if in.Strings != nil { - in, out := &in.Strings, &out.Strings - *out = make([]string, len(*in)) - copy(*out, *in) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult. +func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult { + if in == nil { + return nil + } + out := new(DeviceRequestAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) { + *out = *in + if in.CEL != nil { + in, out := &in.CEL, &out.CEL + *out = new(CELDeviceSelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector. +func (in *DeviceSelector) DeepCopy() *DeviceSelector { + if in == nil { + return nil } + out := new(DeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { + *out = *in + in.Parameters.DeepCopyInto(&out.Parameters) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesStringSlice. -func (in *NamedResourcesStringSlice) DeepCopy() *NamedResourcesStringSlice { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { if in == nil { return nil } - out := new(NamedResourcesStringSlice) + out := new(OpaqueDeviceConfiguration) in.DeepCopyInto(out) return out } @@ -503,93 +679,6 @@ func (in *ResourceClaimList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimParameters) DeepCopyInto(out *ResourceClaimParameters) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.GeneratedFrom != nil { - in, out := &in.GeneratedFrom, &out.GeneratedFrom - *out = new(ResourceClaimParametersReference) - **out = **in - } - if in.DriverRequests != nil { - in, out := &in.DriverRequests, &out.DriverRequests - *out = make([]DriverRequests, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParameters. -func (in *ResourceClaimParameters) DeepCopy() *ResourceClaimParameters { - if in == nil { - return nil - } - out := new(ResourceClaimParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimParameters) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimParametersList) DeepCopyInto(out *ResourceClaimParametersList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClaimParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersList. -func (in *ResourceClaimParametersList) DeepCopy() *ResourceClaimParametersList { - if in == nil { - return nil - } - out := new(ResourceClaimParametersList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimParametersList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersReference. -func (in *ResourceClaimParametersReference) DeepCopy() *ResourceClaimParametersReference { - if in == nil { - return nil - } - out := new(ResourceClaimParametersReference) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { *out = *in @@ -614,11 +703,7 @@ func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStat // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { *out = *in - if in.ParametersRef != nil { - in, out := &in.ParametersRef, &out.ParametersRef - *out = new(ResourceClaimParametersReference) - **out = **in - } + in.Devices.DeepCopyInto(&out.Devices) return } @@ -737,288 +822,17 @@ func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClass) DeepCopyInto(out *ResourceClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.ParametersRef != nil { - in, out := &in.ParametersRef, &out.ParametersRef - *out = new(ResourceClassParametersReference) - **out = **in - } - if in.SuitableNodes != nil { - in, out := &in.SuitableNodes, &out.SuitableNodes - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } - if in.StructuredParameters != nil { - in, out := &in.StructuredParameters, &out.StructuredParameters - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClass. -func (in *ResourceClass) DeepCopy() *ResourceClass { - if in == nil { - return nil - } - out := new(ResourceClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassList) DeepCopyInto(out *ResourceClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassList. -func (in *ResourceClassList) DeepCopy() *ResourceClassList { - if in == nil { - return nil - } - out := new(ResourceClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassParameters) DeepCopyInto(out *ResourceClassParameters) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.GeneratedFrom != nil { - in, out := &in.GeneratedFrom, &out.GeneratedFrom - *out = new(ResourceClassParametersReference) - **out = **in - } - if in.VendorParameters != nil { - in, out := &in.VendorParameters, &out.VendorParameters - *out = make([]VendorParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Filters != nil { - in, out := &in.Filters, &out.Filters - *out = make([]ResourceFilter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParameters. -func (in *ResourceClassParameters) DeepCopy() *ResourceClassParameters { - if in == nil { - return nil - } - out := new(ResourceClassParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClassParameters) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassParametersList) DeepCopyInto(out *ResourceClassParametersList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClassParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersList. -func (in *ResourceClassParametersList) DeepCopy() *ResourceClassParametersList { - if in == nil { - return nil - } - out := new(ResourceClassParametersList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClassParametersList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersReference. -func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersReference { - if in == nil { - return nil - } - out := new(ResourceClassParametersReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFilter) DeepCopyInto(out *ResourceFilter) { - *out = *in - in.ResourceFilterModel.DeepCopyInto(&out.ResourceFilterModel) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilter. -func (in *ResourceFilter) DeepCopy() *ResourceFilter { - if in == nil { - return nil - } - out := new(ResourceFilter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFilterModel) DeepCopyInto(out *ResourceFilterModel) { - *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesFilter) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilterModel. -func (in *ResourceFilterModel) DeepCopy() *ResourceFilterModel { - if in == nil { - return nil - } - out := new(ResourceFilterModel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) { - *out = *in - if in.StructuredData != nil { - in, out := &in.StructuredData, &out.StructuredData - *out = new(StructuredResourceHandle) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle. -func (in *ResourceHandle) DeepCopy() *ResourceHandle { - if in == nil { - return nil - } - out := new(ResourceHandle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceModel) DeepCopyInto(out *ResourceModel) { - *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesResources) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceModel. -func (in *ResourceModel) DeepCopy() *ResourceModel { - if in == nil { - return nil - } - out := new(ResourceModel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequest) DeepCopyInto(out *ResourceRequest) { - *out = *in - in.VendorParameters.DeepCopyInto(&out.VendorParameters) - in.ResourceRequestModel.DeepCopyInto(&out.ResourceRequestModel) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequest. -func (in *ResourceRequest) DeepCopy() *ResourceRequest { - if in == nil { - return nil - } - out := new(ResourceRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequestModel) DeepCopyInto(out *ResourceRequestModel) { +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) { *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesRequest) - **out = **in - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequestModel. -func (in *ResourceRequestModel) DeepCopy() *ResourceRequestModel { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool. +func (in *ResourcePool) DeepCopy() *ResourcePool { if in == nil { return nil } - out := new(ResourceRequestModel) + out := new(ResourcePool) in.DeepCopyInto(out) return out } @@ -1028,7 +842,7 @@ func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.ResourceModel.DeepCopyInto(&out.ResourceModel) + in.Spec.DeepCopyInto(&out.Spec) return } @@ -1084,13 +898,17 @@ func (in *ResourceSliceList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) { +func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) { *out = *in - in.VendorClassParameters.DeepCopyInto(&out.VendorClassParameters) - in.VendorClaimParameters.DeepCopyInto(&out.VendorClaimParameters) - if in.Results != nil { - in, out := &in.Results, &out.Results - *out = make([]DriverAllocationResult, len(*in)) + out.Pool = in.Pool + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1098,29 +916,12 @@ func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StructuredResourceHandle. -func (in *StructuredResourceHandle) DeepCopy() *StructuredResourceHandle { - if in == nil { - return nil - } - out := new(StructuredResourceHandle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VendorParameters) DeepCopyInto(out *VendorParameters) { - *out = *in - in.Parameters.DeepCopyInto(&out.Parameters) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VendorParameters. -func (in *VendorParameters) DeepCopy() *VendorParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec. +func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec { if in == nil { return nil } - out := new(VendorParameters) + out := new(ResourceSliceSpec) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go index 76c4da002e..ee3c668471 100644 --- a/vendor/k8s.io/api/scheduling/v1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=scheduling.k8s.io package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index c1a27e8baa..374e68238b 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -35,7 +35,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -66,7 +66,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index 146bae40d3..019dbcd00e 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. @@ -59,6 +60,7 @@ type PriorityClass struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClassList is a collection of priority classes. type PriorityClassList struct { diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..a4a432a64f --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClass) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index f0878fb16e..e42dccc688 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -36,7 +36,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -67,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 43878184d6..7f77b01753 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -36,7 +36,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -67,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 75a6489da2..e2310dac23 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/storage/v1" diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 06bbe3d5cf..ec2beac468 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -44,7 +44,7 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; @@ -55,7 +55,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -226,7 +226,7 @@ message CSIDriverSpec { message CSINode { // Standard object's metadata. // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -275,7 +275,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -327,7 +327,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -336,7 +336,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -356,7 +356,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -370,7 +370,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -378,7 +378,7 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; @@ -393,7 +393,7 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // provisioner indicates the type of the provisioner. optional string provisioner = 2; @@ -431,7 +431,7 @@ message StorageClass { // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -439,7 +439,7 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of StorageClasses repeated StorageClass items = 2; @@ -466,7 +466,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -484,7 +484,7 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; @@ -506,7 +506,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -554,7 +554,7 @@ message VolumeAttachmentStatus { message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index a94c7f44c5..de2bbc2e06 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -25,6 +25,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. @@ -79,6 +80,7 @@ type StorageClass struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClassList is a collection of storage classes. type StorageClassList struct { @@ -112,6 +114,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. @@ -137,6 +140,7 @@ type VolumeAttachment struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { @@ -227,6 +231,7 @@ type VolumeError struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriver captures information about a Container Storage Interface (CSI) // volume driver deployed on the cluster. @@ -251,6 +256,7 @@ type CSIDriver struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriverList is a collection of CSIDriver objects. type CSIDriverList struct { @@ -491,6 +497,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as @@ -572,6 +579,7 @@ type VolumeNodeResources struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINodeList is a collection of CSINode objects. type CSINodeList struct { @@ -588,6 +596,7 @@ type CSINodeList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 // CSIStorageCapacity stores the result of one CSI GetCapacity call. // For a given StorageClass, this describes the available capacity in a @@ -673,6 +682,7 @@ type CSIStorageCapacity struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..a44c1181ad --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriver) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriverList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINode) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacity) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacityList) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClass) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachment) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachmentList) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 93aefd933a..380adbf66e 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -65,7 +65,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -74,7 +74,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -94,7 +94,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -108,7 +108,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -116,7 +116,7 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; @@ -130,7 +130,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -148,7 +148,7 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; @@ -170,7 +170,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is alpha-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -221,7 +221,7 @@ message VolumeAttributesClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Name of the CSI driver // This field is immutable. @@ -248,7 +248,7 @@ message VolumeAttributesClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttributesClass objects. repeated VolumeAttributesClass items = 2; @@ -258,7 +258,7 @@ message VolumeAttributesClassList { message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index c503ec6511..446a40c483 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -524,10 +524,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } +func (*VolumeAttributesClass) ProtoMessage() {} +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{17} +} +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClass.Merge(m, src) +} +func (m *VolumeAttributesClass) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClass) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo + +func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } +func (*VolumeAttributesClassList) ProtoMessage() {} +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{18} +} +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) +} +func (m *VolumeAttributesClassList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo + func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_73e4f72503e71065, []int{17} + return fileDescriptor_73e4f72503e71065, []int{19} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +611,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_73e4f72503e71065, []int{18} + return fileDescriptor_73e4f72503e71065, []int{20} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -600,6 +656,9 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass.ParametersEntry") + proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") } @@ -609,111 +668,115 @@ func init() { } var fileDescriptor_73e4f72503e71065 = []byte{ - // 1655 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xcf, 0xc6, 0xce, 0xdb, 0x38, 0x69, 0x92, 0x49, 0xda, 0xbf, 0xff, 0x3e, 0xd8, 0x91, 0x11, - 0x34, 0xad, 0xca, 0xba, 0x0d, 0xa5, 0xaa, 0x2a, 0x55, 0x22, 0x9b, 0x04, 0xea, 0x36, 0x4e, 0xd3, - 0x71, 0x54, 0x55, 0x15, 0x07, 0xc6, 0xeb, 0x89, 0x33, 0x8d, 0xf7, 0xa5, 0x3b, 0xe3, 0x10, 0x73, - 0x82, 0x0b, 0x67, 0xc4, 0x81, 0x4f, 0xc0, 0x57, 0x00, 0x09, 0x2e, 0x1c, 0xa9, 0x84, 0x84, 0x2a, - 0x2e, 0xf4, 0x64, 0x51, 0xf3, 0x11, 0x90, 0x38, 0x44, 0x1c, 0xd0, 0xcc, 0x8e, 0xbd, 0x6f, 0x76, - 0x93, 0x70, 0xf0, 0xcd, 0xf3, 0xbc, 0xfc, 0x9e, 0x67, 0xe6, 0x79, 0x5d, 0x83, 0xab, 0x87, 0xb7, - 0x99, 0x4e, 0x9d, 0x12, 0x76, 0x69, 0x89, 0x71, 0xc7, 0xc3, 0x0d, 0x52, 0x3a, 0xba, 0x51, 0x23, - 0x1c, 0xdf, 0x28, 0x35, 0x88, 0x4d, 0x3c, 0xcc, 0x49, 0x5d, 0x77, 0x3d, 0x87, 0x3b, 0x30, 0xe7, - 0xcb, 0xea, 0xd8, 0xa5, 0xba, 0x92, 0xd5, 0x95, 0x6c, 0xee, 0xdd, 0x06, 0xe5, 0x07, 0xad, 0x9a, - 0x6e, 0x3a, 0x56, 0xa9, 0xe1, 0x34, 0x9c, 0x92, 0x54, 0xa9, 0xb5, 0xf6, 0xe5, 0x49, 0x1e, 0xe4, - 0x2f, 0x1f, 0x2a, 0x57, 0x0c, 0x99, 0x35, 0x1d, 0x4f, 0xd8, 0x8c, 0x9b, 0xcb, 0xdd, 0x0c, 0x64, - 0x2c, 0x6c, 0x1e, 0x50, 0x9b, 0x78, 0xed, 0x92, 0x7b, 0xd8, 0x90, 0x4a, 0x1e, 0x61, 0x4e, 0xcb, - 0x33, 0xc9, 0xb9, 0xb4, 0x58, 0xc9, 0x22, 0x1c, 0x0f, 0xb2, 0x55, 0x1a, 0xa6, 0xe5, 0xb5, 0x6c, - 0x4e, 0xad, 0xa4, 0x99, 0x5b, 0xa7, 0x29, 0x30, 0xf3, 0x80, 0x58, 0x38, 0xae, 0x57, 0xfc, 0x51, - 0x03, 0x33, 0x1b, 0xd5, 0xf2, 0xa6, 0x47, 0x8f, 0x88, 0x07, 0x3f, 0x01, 0xd3, 0xc2, 0xa3, 0x3a, - 0xe6, 0x38, 0xab, 0xad, 0x68, 0xab, 0x99, 0xb5, 0xeb, 0x7a, 0xf0, 0xc8, 0x7d, 0x60, 0xdd, 0x3d, - 0x6c, 0x08, 0x02, 0xd3, 0x85, 0xb4, 0x7e, 0x74, 0x43, 0x7f, 0x58, 0x7b, 0x46, 0x4c, 0x5e, 0x21, - 0x1c, 0x1b, 0xf0, 0x45, 0xa7, 0x30, 0xd6, 0xed, 0x14, 0x40, 0x40, 0x43, 0x7d, 0x54, 0xf8, 0x00, - 0xa4, 0x99, 0x4b, 0xcc, 0xec, 0xb8, 0x44, 0xbf, 0xa2, 0x0f, 0x0f, 0xa1, 0xde, 0x77, 0xab, 0xea, - 0x12, 0xd3, 0x98, 0x55, 0xb0, 0x69, 0x71, 0x42, 0x12, 0xa4, 0xf8, 0x83, 0x06, 0xe6, 0xfa, 0x52, - 0xdb, 0x94, 0x71, 0xf8, 0x71, 0xe2, 0x02, 0xfa, 0xd9, 0x2e, 0x20, 0xb4, 0xa5, 0xfb, 0x0b, 0xca, - 0xce, 0x74, 0x8f, 0x12, 0x72, 0xfe, 0x3e, 0x98, 0xa0, 0x9c, 0x58, 0x2c, 0x3b, 0xbe, 0x92, 0x5a, - 0xcd, 0xac, 0xbd, 0x7d, 0x26, 0xef, 0x8d, 0x39, 0x85, 0x38, 0x51, 0x16, 0xba, 0xc8, 0x87, 0x28, - 0xfe, 0x9e, 0x0e, 0xf9, 0x2e, 0xee, 0x04, 0xef, 0x80, 0x0b, 0x98, 0x73, 0x6c, 0x1e, 0x20, 0xf2, - 0xbc, 0x45, 0x3d, 0x52, 0x97, 0x37, 0x98, 0x36, 0x60, 0xb7, 0x53, 0xb8, 0xb0, 0x1e, 0xe1, 0xa0, - 0x98, 0xa4, 0xd0, 0x75, 0x9d, 0x7a, 0xd9, 0xde, 0x77, 0x1e, 0xda, 0x15, 0xa7, 0x65, 0x73, 0xf9, - 0xc0, 0x4a, 0x77, 0x37, 0xc2, 0x41, 0x31, 0x49, 0x68, 0x82, 0xe5, 0x23, 0xa7, 0xd9, 0xb2, 0xc8, - 0x36, 0xdd, 0x27, 0x66, 0xdb, 0x6c, 0x92, 0x8a, 0x53, 0x27, 0x2c, 0x9b, 0x5a, 0x49, 0xad, 0xce, - 0x18, 0xa5, 0x6e, 0xa7, 0xb0, 0xfc, 0x78, 0x00, 0xff, 0xa4, 0x53, 0x58, 0x1a, 0x40, 0x47, 0x03, - 0xc1, 0xe0, 0x5d, 0x30, 0xaf, 0x5e, 0x68, 0x03, 0xbb, 0xd8, 0xa4, 0xbc, 0x9d, 0x4d, 0x4b, 0x0f, - 0x97, 0xba, 0x9d, 0xc2, 0x7c, 0x35, 0xca, 0x42, 0x71, 0x59, 0x78, 0x0f, 0xcc, 0xed, 0xb3, 0x8f, - 0x3c, 0xa7, 0xe5, 0xee, 0x3a, 0x4d, 0x6a, 0xb6, 0xb3, 0x13, 0x2b, 0xda, 0xea, 0x8c, 0x51, 0xec, - 0x76, 0x0a, 0x73, 0x1f, 0x56, 0x43, 0x8c, 0x93, 0x38, 0x01, 0x45, 0x15, 0x21, 0x01, 0x73, 0xdc, - 0x39, 0x24, 0xb6, 0x78, 0x3a, 0xc2, 0x38, 0xcb, 0x4e, 0xca, 0x58, 0xae, 0xbe, 0x29, 0x96, 0x7b, - 0x21, 0x05, 0xe3, 0xa2, 0x0a, 0xe7, 0x5c, 0x98, 0xca, 0x50, 0x14, 0x15, 0x6e, 0x80, 0x45, 0xcf, - 0x0f, 0x0e, 0x43, 0xc4, 0x6d, 0xd5, 0x9a, 0x94, 0x1d, 0x64, 0xa7, 0xe4, 0x8d, 0x2f, 0x76, 0x3b, - 0x85, 0x45, 0x14, 0x67, 0xa2, 0xa4, 0x3c, 0xbc, 0x09, 0x66, 0x19, 0xd9, 0xa6, 0x76, 0xeb, 0xd8, - 0x8f, 0xe9, 0xb4, 0xd4, 0x5f, 0xe8, 0x76, 0x0a, 0xb3, 0xd5, 0xad, 0x80, 0x8e, 0x22, 0x52, 0xc5, - 0xef, 0x35, 0x30, 0xb5, 0x51, 0x2d, 0xef, 0x38, 0x75, 0x32, 0x82, 0x82, 0x2e, 0x47, 0x0a, 0xfa, - 0xf2, 0x29, 0x25, 0x21, 0x9c, 0x1a, 0x5a, 0xce, 0x7f, 0xf9, 0xe5, 0x2c, 0x64, 0x54, 0x3f, 0x5a, - 0x01, 0x69, 0x1b, 0x5b, 0x44, 0xba, 0x3e, 0x13, 0xe8, 0xec, 0x60, 0x8b, 0x20, 0xc9, 0x81, 0xef, - 0x80, 0x49, 0xdb, 0xa9, 0x93, 0xf2, 0xa6, 0x74, 0x60, 0xc6, 0xb8, 0xa0, 0x64, 0x26, 0x77, 0x24, - 0x15, 0x29, 0xae, 0x78, 0x4a, 0xee, 0xb8, 0x4e, 0xd3, 0x69, 0xb4, 0x1f, 0x90, 0x76, 0x2f, 0xb9, - 0xe5, 0x53, 0xee, 0x85, 0xe8, 0x28, 0x22, 0x05, 0x6b, 0x20, 0x83, 0x9b, 0x4d, 0xc7, 0xc4, 0x1c, - 0xd7, 0x9a, 0x44, 0x66, 0x6c, 0x66, 0xad, 0xf4, 0xa6, 0x3b, 0xfa, 0x15, 0x21, 0x8c, 0x23, 0x35, - 0x11, 0x98, 0x31, 0xdf, 0xed, 0x14, 0x32, 0xeb, 0x01, 0x0e, 0x0a, 0x83, 0x16, 0xbf, 0xd3, 0x40, - 0x46, 0xdd, 0x7a, 0x04, 0x2d, 0xec, 0x5e, 0xb4, 0x85, 0xbd, 0x75, 0x86, 0x78, 0x0d, 0x69, 0x60, - 0x66, 0xdf, 0x6d, 0xd9, 0xbd, 0xf6, 0xc0, 0x54, 0x5d, 0x06, 0x8d, 0x65, 0x35, 0x09, 0x7d, 0xe5, - 0x0c, 0xd0, 0xaa, 0x43, 0xce, 0x2b, 0x03, 0x53, 0xfe, 0x99, 0xa1, 0x1e, 0x54, 0xf1, 0xef, 0x14, - 0x80, 0x1b, 0xd5, 0x72, 0xac, 0x3f, 0x8c, 0x20, 0xad, 0x29, 0x98, 0x15, 0x99, 0xd3, 0xcb, 0x0d, - 0x95, 0xde, 0xef, 0x9d, 0x31, 0x12, 0xb8, 0x46, 0x9a, 0x55, 0xd2, 0x24, 0x26, 0x77, 0x3c, 0x3f, - 0xc9, 0x76, 0x42, 0x60, 0x28, 0x02, 0x0d, 0x37, 0xc1, 0x42, 0xaf, 0xdd, 0x35, 0x31, 0x63, 0x22, - 0xb9, 0xb3, 0x29, 0x99, 0xcc, 0x59, 0xe5, 0xe2, 0x42, 0x35, 0xc6, 0x47, 0x09, 0x0d, 0xf8, 0x04, - 0x4c, 0x9b, 0xe1, 0xce, 0x7a, 0x4a, 0xda, 0xe8, 0xbd, 0x85, 0x45, 0x7f, 0xd4, 0xc2, 0x36, 0xa7, - 0xbc, 0x6d, 0xcc, 0x8a, 0x94, 0xe9, 0xb7, 0xe0, 0x3e, 0x1a, 0x64, 0x60, 0xd1, 0xc2, 0xc7, 0xd4, - 0x6a, 0x59, 0x7e, 0x72, 0x57, 0xe9, 0x67, 0x44, 0xf6, 0xdf, 0xf3, 0x9b, 0x90, 0xad, 0xaf, 0x12, - 0x07, 0x43, 0x49, 0xfc, 0xe2, 0x2f, 0x1a, 0xb8, 0x94, 0x0c, 0xfc, 0x08, 0x0a, 0xa4, 0x1a, 0x2d, - 0x10, 0xfd, 0x94, 0x2c, 0x8e, 0x39, 0x38, 0xa4, 0x56, 0xbe, 0x9e, 0x04, 0xb3, 0xe1, 0x18, 0x8e, - 0x20, 0x81, 0xdf, 0x07, 0x19, 0xd7, 0x73, 0x8e, 0x28, 0xa3, 0x8e, 0x4d, 0x3c, 0xd5, 0x1d, 0x97, - 0x94, 0x4a, 0x66, 0x37, 0x60, 0xa1, 0xb0, 0x1c, 0x6c, 0x02, 0xe0, 0x62, 0x0f, 0x5b, 0x84, 0x8b, - 0x4a, 0x4e, 0xc9, 0x37, 0xb8, 0xfd, 0xa6, 0x37, 0x08, 0x5f, 0x4b, 0xdf, 0xed, 0xab, 0x6e, 0xd9, - 0xdc, 0x6b, 0x07, 0x2e, 0x06, 0x0c, 0x14, 0xc2, 0x87, 0x87, 0x60, 0xce, 0x23, 0x66, 0x13, 0x53, - 0x4b, 0x8d, 0xf5, 0xb4, 0x74, 0x73, 0x4b, 0x8c, 0x57, 0x14, 0x66, 0x9c, 0x74, 0x0a, 0xd7, 0x93, - 0x2b, 0xba, 0xbe, 0x4b, 0x3c, 0x46, 0x19, 0x27, 0x36, 0xf7, 0x53, 0x27, 0xa2, 0x83, 0xa2, 0xd8, - 0x62, 0x04, 0x58, 0x62, 0x40, 0x3e, 0x74, 0x39, 0x75, 0x6c, 0x96, 0x9d, 0x08, 0x46, 0x40, 0x25, - 0x44, 0x47, 0x11, 0x29, 0xb8, 0x0d, 0x96, 0x45, 0xb7, 0xfe, 0xd4, 0x37, 0xb0, 0x75, 0xec, 0x62, - 0x5b, 0x3c, 0x55, 0x76, 0x52, 0xce, 0xe2, 0xac, 0xd8, 0x8e, 0xd6, 0x07, 0xf0, 0xd1, 0x40, 0x2d, - 0xf8, 0x04, 0x2c, 0xfa, 0xeb, 0x91, 0x41, 0xed, 0x3a, 0xb5, 0x1b, 0x62, 0x39, 0x92, 0x6b, 0xc1, - 0x8c, 0x71, 0x55, 0xd4, 0xc6, 0xe3, 0x38, 0xf3, 0x64, 0x10, 0x11, 0x25, 0x41, 0xe0, 0x73, 0xb0, - 0x28, 0x2d, 0x92, 0xba, 0x6a, 0x2c, 0x94, 0xb0, 0xec, 0x74, 0x72, 0xb7, 0x11, 0x4f, 0x27, 0x12, - 0xa9, 0xd7, 0x7e, 0x7a, 0x6d, 0x6a, 0x8f, 0x78, 0x96, 0xf1, 0x7f, 0x15, 0xaf, 0xc5, 0xf5, 0x38, - 0x14, 0x4a, 0xa2, 0xe7, 0xee, 0x82, 0xf9, 0x58, 0xc0, 0xe1, 0x02, 0x48, 0x1d, 0x92, 0xb6, 0x3f, - 0xaf, 0x91, 0xf8, 0x09, 0x97, 0xc1, 0xc4, 0x11, 0x6e, 0xb6, 0x88, 0x9f, 0x81, 0xc8, 0x3f, 0xdc, - 0x19, 0xbf, 0xad, 0x15, 0x7f, 0xd2, 0x40, 0xa4, 0xb1, 0x8d, 0xa0, 0xb8, 0x2b, 0xd1, 0xe2, 0x5e, - 0x3d, 0x6b, 0x62, 0x0f, 0x29, 0xeb, 0x2f, 0x34, 0x30, 0x1b, 0xde, 0x02, 0xe1, 0x35, 0x30, 0x8d, - 0x5b, 0x75, 0x4a, 0x6c, 0xb3, 0xb7, 0xb3, 0xf4, 0xbd, 0x59, 0x57, 0x74, 0xd4, 0x97, 0x10, 0x3b, - 0x22, 0x39, 0x76, 0xa9, 0x87, 0x45, 0xa6, 0x55, 0x89, 0xe9, 0xd8, 0x75, 0x26, 0x9f, 0x29, 0xe5, - 0x37, 0xca, 0xad, 0x38, 0x13, 0x25, 0xe5, 0x8b, 0xdf, 0x8e, 0x83, 0x05, 0x3f, 0x41, 0xfc, 0x4f, - 0x04, 0x8b, 0xd8, 0x7c, 0x04, 0xed, 0x05, 0x45, 0xd6, 0xbe, 0xeb, 0xa7, 0xaf, 0x44, 0x81, 0x77, - 0xc3, 0xf6, 0x3f, 0xf8, 0x14, 0x4c, 0x32, 0x8e, 0x79, 0x8b, 0xc9, 0xf1, 0x97, 0x59, 0x5b, 0x3b, - 0x17, 0xaa, 0xd4, 0x0c, 0xf6, 0x3f, 0xff, 0x8c, 0x14, 0x62, 0xf1, 0x67, 0x0d, 0x2c, 0xc7, 0x55, - 0x46, 0x90, 0x70, 0x8f, 0xa2, 0x09, 0x77, 0xed, 0x3c, 0x37, 0x1a, 0x92, 0x74, 0xbf, 0x69, 0xe0, - 0x52, 0xe2, 0xf2, 0x72, 0xce, 0x8a, 0x5e, 0xe5, 0xc6, 0x3a, 0xe2, 0x4e, 0xb0, 0x3e, 0xcb, 0x5e, - 0xb5, 0x3b, 0x80, 0x8f, 0x06, 0x6a, 0xc1, 0x67, 0x60, 0x81, 0xda, 0x4d, 0x6a, 0x13, 0x35, 0x96, - 0x83, 0x70, 0x0f, 0x6c, 0x28, 0x71, 0x64, 0x19, 0xe6, 0x65, 0xb1, 0xbd, 0x94, 0x63, 0x28, 0x28, - 0x81, 0x5b, 0xfc, 0x75, 0x40, 0x78, 0xe4, 0x5a, 0x29, 0x2a, 0x4a, 0x52, 0x88, 0x97, 0xa8, 0x28, - 0x45, 0x47, 0x7d, 0x09, 0x99, 0x41, 0xf2, 0x29, 0x94, 0xa3, 0xe7, 0xcb, 0x20, 0xa9, 0x19, 0xca, - 0x20, 0x79, 0x46, 0x0a, 0x51, 0x78, 0x22, 0xd6, 0xb6, 0xd0, 0x7a, 0xd6, 0xf7, 0x64, 0x47, 0xd1, - 0x51, 0x5f, 0xa2, 0xf8, 0x4f, 0x6a, 0x40, 0x94, 0x64, 0x2a, 0x86, 0xae, 0xd4, 0xfb, 0xc2, 0x8f, - 0x5f, 0xa9, 0xde, 0xbf, 0x52, 0x1d, 0x7e, 0xa3, 0x01, 0x88, 0xfb, 0x10, 0x95, 0x5e, 0xaa, 0xfa, - 0xf9, 0x74, 0xff, 0xfc, 0x15, 0xa2, 0xaf, 0x27, 0xc0, 0xfc, 0x59, 0x9d, 0x53, 0x4e, 0xc0, 0xa4, - 0x00, 0x1a, 0xe0, 0x01, 0xa4, 0x20, 0xe3, 0x53, 0xb7, 0x3c, 0xcf, 0xf1, 0x54, 0xc9, 0x5e, 0x3e, - 0xdd, 0x21, 0x29, 0x6e, 0xe4, 0xe5, 0x37, 0x51, 0xa0, 0x7f, 0xd2, 0x29, 0x64, 0x42, 0x7c, 0x14, - 0xc6, 0x16, 0xa6, 0xea, 0x24, 0x30, 0x95, 0xfe, 0x0f, 0xa6, 0x36, 0xc9, 0x70, 0x53, 0x21, 0xec, - 0xdc, 0x16, 0xf8, 0xdf, 0x90, 0x07, 0x3a, 0xd7, 0x6c, 0xfb, 0x52, 0x03, 0x61, 0x1b, 0x70, 0x1b, - 0xa4, 0x39, 0x55, 0x95, 0x98, 0x59, 0xbb, 0x7a, 0xb6, 0x0e, 0xb3, 0x47, 0x2d, 0x12, 0x34, 0x4a, - 0x71, 0x42, 0x12, 0x05, 0x5e, 0x01, 0x53, 0x16, 0x61, 0x0c, 0x37, 0x94, 0xe5, 0xe0, 0x03, 0xaa, - 0xe2, 0x93, 0x51, 0x8f, 0x5f, 0xbc, 0x05, 0x96, 0x06, 0x7c, 0x92, 0xc2, 0x02, 0x98, 0x30, 0xe5, - 0x5f, 0x0a, 0xc2, 0xa1, 0x09, 0x63, 0x46, 0x74, 0x99, 0x0d, 0xf9, 0x5f, 0x82, 0x4f, 0x37, 0x3e, - 0x78, 0xf1, 0x3a, 0x3f, 0xf6, 0xf2, 0x75, 0x7e, 0xec, 0xd5, 0xeb, 0xfc, 0xd8, 0xe7, 0xdd, 0xbc, - 0xf6, 0xa2, 0x9b, 0xd7, 0x5e, 0x76, 0xf3, 0xda, 0xab, 0x6e, 0x5e, 0xfb, 0xa3, 0x9b, 0xd7, 0xbe, - 0xfa, 0x33, 0x3f, 0xf6, 0x34, 0x37, 0xfc, 0xdf, 0xda, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x12, - 0x41, 0x18, 0xc9, 0xca, 0x15, 0x00, 0x00, + // 1728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x4f, 0xc7, 0xce, 0x57, 0x39, 0x99, 0x24, 0x35, 0x99, 0xc5, 0xeb, 0x83, 0x1d, 0x19, 0xc1, + 0x66, 0x46, 0x4b, 0x7b, 0x12, 0x96, 0xd5, 0x68, 0xa5, 0x95, 0x48, 0x27, 0x81, 0xf5, 0x6e, 0x9c, + 0xc9, 0x96, 0xa3, 0xd1, 0x6a, 0xc5, 0x81, 0x72, 0xbb, 0xe2, 0xd4, 0xc6, 0xfd, 0xb1, 0x5d, 0xd5, + 0x21, 0xe6, 0x04, 0x17, 0xce, 0x88, 0x03, 0x7f, 0x01, 0xff, 0x02, 0x48, 0x70, 0xe1, 0xc8, 0x48, + 0x48, 0x68, 0xe1, 0xc2, 0x9e, 0x2c, 0xc6, 0xf3, 0x27, 0x20, 0x71, 0x88, 0x38, 0xa0, 0xaa, 0x2e, + 0xf7, 0xb7, 0x27, 0x36, 0x2b, 0xf9, 0xe6, 0x7a, 0x1f, 0xbf, 0x7a, 0x55, 0xef, 0xf7, 0x5e, 0xbd, + 0x36, 0x78, 0x72, 0xfd, 0x8c, 0xe9, 0xd4, 0x69, 0x60, 0x97, 0x36, 0x18, 0x77, 0x3c, 0xdc, 0x23, + 0x8d, 0x9b, 0xfd, 0x0e, 0xe1, 0x78, 0xbf, 0xd1, 0x23, 0x36, 0xf1, 0x30, 0x27, 0x5d, 0xdd, 0xf5, + 0x1c, 0xee, 0xc0, 0x4a, 0x60, 0xab, 0x63, 0x97, 0xea, 0xca, 0x56, 0x57, 0xb6, 0x95, 0xef, 0xf5, + 0x28, 0xbf, 0xf2, 0x3b, 0xba, 0xe9, 0x58, 0x8d, 0x9e, 0xd3, 0x73, 0x1a, 0xd2, 0xa5, 0xe3, 0x5f, + 0xca, 0x95, 0x5c, 0xc8, 0x5f, 0x01, 0x54, 0xa5, 0x1e, 0xdb, 0xd6, 0x74, 0x3c, 0xb1, 0x67, 0x7a, + 0xbb, 0xca, 0x7b, 0x91, 0x8d, 0x85, 0xcd, 0x2b, 0x6a, 0x13, 0x6f, 0xd0, 0x70, 0xaf, 0x7b, 0xd2, + 0xc9, 0x23, 0xcc, 0xf1, 0x3d, 0x93, 0xcc, 0xe4, 0xc5, 0x1a, 0x16, 0xe1, 0x38, 0x6f, 0xaf, 0xc6, + 0x24, 0x2f, 0xcf, 0xb7, 0x39, 0xb5, 0xb2, 0xdb, 0xbc, 0x7f, 0x9f, 0x03, 0x33, 0xaf, 0x88, 0x85, + 0xd3, 0x7e, 0xf5, 0x3f, 0x69, 0x60, 0xed, 0xa8, 0xdd, 0x3c, 0xf6, 0xe8, 0x0d, 0xf1, 0xe0, 0x4f, + 0xc1, 0xaa, 0x88, 0xa8, 0x8b, 0x39, 0x2e, 0x6b, 0xbb, 0xda, 0x5e, 0xe9, 0xe0, 0xa9, 0x1e, 0x5d, + 0x72, 0x08, 0xac, 0xbb, 0xd7, 0x3d, 0x21, 0x60, 0xba, 0xb0, 0xd6, 0x6f, 0xf6, 0xf5, 0xe7, 0x9d, + 0x2f, 0x88, 0xc9, 0x5b, 0x84, 0x63, 0x03, 0xbe, 0x1c, 0xd6, 0x16, 0x46, 0xc3, 0x1a, 0x88, 0x64, + 0x28, 0x44, 0x85, 0x9f, 0x80, 0x22, 0x73, 0x89, 0x59, 0x5e, 0x94, 0xe8, 0x8f, 0xf5, 0xc9, 0x29, + 0xd4, 0xc3, 0xb0, 0xda, 0x2e, 0x31, 0x8d, 0x75, 0x05, 0x5b, 0x14, 0x2b, 0x24, 0x41, 0xea, 0x7f, + 0xd4, 0xc0, 0x46, 0x68, 0x75, 0x4a, 0x19, 0x87, 0x3f, 0xc9, 0x1c, 0x40, 0x9f, 0xee, 0x00, 0xc2, + 0x5b, 0x86, 0xbf, 0xa5, 0xf6, 0x59, 0x1d, 0x4b, 0x62, 0xc1, 0x7f, 0x0c, 0x96, 0x28, 0x27, 0x16, + 0x2b, 0x2f, 0xee, 0x16, 0xf6, 0x4a, 0x07, 0xdf, 0x99, 0x2a, 0x7a, 0x63, 0x43, 0x21, 0x2e, 0x35, + 0x85, 0x2f, 0x0a, 0x20, 0xea, 0xff, 0x2c, 0xc6, 0x62, 0x17, 0x67, 0x82, 0x1f, 0x80, 0x07, 0x98, + 0x73, 0x6c, 0x5e, 0x21, 0xf2, 0xa5, 0x4f, 0x3d, 0xd2, 0x95, 0x27, 0x58, 0x35, 0xe0, 0x68, 0x58, + 0x7b, 0x70, 0x98, 0xd0, 0xa0, 0x94, 0xa5, 0xf0, 0x75, 0x9d, 0x6e, 0xd3, 0xbe, 0x74, 0x9e, 0xdb, + 0x2d, 0xc7, 0xb7, 0xb9, 0xbc, 0x60, 0xe5, 0x7b, 0x9e, 0xd0, 0xa0, 0x94, 0x25, 0x34, 0xc1, 0xce, + 0x8d, 0xd3, 0xf7, 0x2d, 0x72, 0x4a, 0x2f, 0x89, 0x39, 0x30, 0xfb, 0xa4, 0xe5, 0x74, 0x09, 0x2b, + 0x17, 0x76, 0x0b, 0x7b, 0x6b, 0x46, 0x63, 0x34, 0xac, 0xed, 0xbc, 0xc8, 0xd1, 0xdf, 0x0d, 0x6b, + 0x0f, 0x73, 0xe4, 0x28, 0x17, 0x0c, 0x7e, 0x08, 0x36, 0xd5, 0x0d, 0x1d, 0x61, 0x17, 0x9b, 0x94, + 0x0f, 0xca, 0x45, 0x19, 0xe1, 0xc3, 0xd1, 0xb0, 0xb6, 0xd9, 0x4e, 0xaa, 0x50, 0xda, 0x16, 0x7e, + 0x04, 0x36, 0x2e, 0xd9, 0x8f, 0x3d, 0xc7, 0x77, 0xcf, 0x9d, 0x3e, 0x35, 0x07, 0xe5, 0xa5, 0x5d, + 0x6d, 0x6f, 0xcd, 0xa8, 0x8f, 0x86, 0xb5, 0x8d, 0x1f, 0xb5, 0x63, 0x8a, 0xbb, 0xb4, 0x00, 0x25, + 0x1d, 0x21, 0x01, 0x1b, 0xdc, 0xb9, 0x26, 0xb6, 0xb8, 0x3a, 0xc2, 0x38, 0x2b, 0x2f, 0xcb, 0x5c, + 0xee, 0xbd, 0x29, 0x97, 0x17, 0x31, 0x07, 0xe3, 0x91, 0x4a, 0xe7, 0x46, 0x5c, 0xca, 0x50, 0x12, + 0x15, 0x1e, 0x81, 0x6d, 0x2f, 0x48, 0x0e, 0x43, 0xc4, 0xf5, 0x3b, 0x7d, 0xca, 0xae, 0xca, 0x2b, + 0xf2, 0xc4, 0x8f, 0x46, 0xc3, 0xda, 0x36, 0x4a, 0x2b, 0x51, 0xd6, 0x1e, 0xbe, 0x07, 0xd6, 0x19, + 0x39, 0xa5, 0xb6, 0x7f, 0x1b, 0xe4, 0x74, 0x55, 0xfa, 0x6f, 0x8d, 0x86, 0xb5, 0xf5, 0xf6, 0x49, + 0x24, 0x47, 0x09, 0xab, 0xfa, 0x1f, 0x34, 0xb0, 0x72, 0xd4, 0x6e, 0x9e, 0x39, 0x5d, 0x32, 0x87, + 0x82, 0x6e, 0x26, 0x0a, 0xfa, 0x9d, 0x7b, 0x4a, 0x42, 0x04, 0x35, 0xb1, 0x9c, 0xff, 0x1d, 0x94, + 0xb3, 0xb0, 0x51, 0xfd, 0x68, 0x17, 0x14, 0x6d, 0x6c, 0x11, 0x19, 0xfa, 0x5a, 0xe4, 0x73, 0x86, + 0x2d, 0x82, 0xa4, 0x06, 0x7e, 0x17, 0x2c, 0xdb, 0x4e, 0x97, 0x34, 0x8f, 0x65, 0x00, 0x6b, 0xc6, + 0x03, 0x65, 0xb3, 0x7c, 0x26, 0xa5, 0x48, 0x69, 0xc5, 0x55, 0x72, 0xc7, 0x75, 0xfa, 0x4e, 0x6f, + 0xf0, 0x09, 0x19, 0x8c, 0xc9, 0x2d, 0xaf, 0xf2, 0x22, 0x26, 0x47, 0x09, 0x2b, 0xd8, 0x01, 0x25, + 0xdc, 0xef, 0x3b, 0x26, 0xe6, 0xb8, 0xd3, 0x27, 0x92, 0xb1, 0xa5, 0x83, 0xc6, 0x9b, 0xce, 0x18, + 0x54, 0x84, 0xd8, 0x1c, 0xa9, 0x17, 0x81, 0x19, 0x9b, 0xa3, 0x61, 0xad, 0x74, 0x18, 0xe1, 0xa0, + 0x38, 0x68, 0xfd, 0xf7, 0x1a, 0x28, 0xa9, 0x53, 0xcf, 0xa1, 0x85, 0x7d, 0x94, 0x6c, 0x61, 0xdf, + 0x9e, 0x22, 0x5f, 0x13, 0x1a, 0x98, 0x19, 0x86, 0x2d, 0xbb, 0xd7, 0x05, 0x58, 0xe9, 0xca, 0xa4, + 0xb1, 0xb2, 0x26, 0xa1, 0x1f, 0x4f, 0x01, 0xad, 0x3a, 0xe4, 0xa6, 0xda, 0x60, 0x25, 0x58, 0x33, + 0x34, 0x86, 0xaa, 0xff, 0xa7, 0x00, 0xe0, 0x51, 0xbb, 0x99, 0xea, 0x0f, 0x73, 0xa0, 0x35, 0x05, + 0xeb, 0x82, 0x39, 0x63, 0x6e, 0x28, 0x7a, 0x7f, 0x7f, 0xca, 0x4c, 0xe0, 0x0e, 0xe9, 0xb7, 0x49, + 0x9f, 0x98, 0xdc, 0xf1, 0x02, 0x92, 0x9d, 0xc5, 0xc0, 0x50, 0x02, 0x1a, 0x1e, 0x83, 0xad, 0x71, + 0xbb, 0xeb, 0x63, 0xc6, 0x04, 0xb9, 0xcb, 0x05, 0x49, 0xe6, 0xb2, 0x0a, 0x71, 0xab, 0x9d, 0xd2, + 0xa3, 0x8c, 0x07, 0xfc, 0x0c, 0xac, 0x9a, 0xf1, 0xce, 0x7a, 0x0f, 0x6d, 0xf4, 0xf1, 0xc0, 0xa2, + 0x7f, 0xea, 0x63, 0x9b, 0x53, 0x3e, 0x30, 0xd6, 0x05, 0x65, 0xc2, 0x16, 0x1c, 0xa2, 0x41, 0x06, + 0xb6, 0x2d, 0x7c, 0x4b, 0x2d, 0xdf, 0x0a, 0xc8, 0xdd, 0xa6, 0x3f, 0x27, 0xb2, 0xff, 0xce, 0xbe, + 0x85, 0x6c, 0x7d, 0xad, 0x34, 0x18, 0xca, 0xe2, 0xd7, 0xff, 0xaa, 0x81, 0xb7, 0xb2, 0x89, 0x9f, + 0x43, 0x81, 0xb4, 0x93, 0x05, 0xa2, 0xdf, 0xc3, 0xe2, 0x54, 0x80, 0x13, 0x6a, 0xe5, 0x37, 0xcb, + 0x60, 0x3d, 0x9e, 0xc3, 0x39, 0x10, 0xf8, 0x07, 0xa0, 0xe4, 0x7a, 0xce, 0x0d, 0x65, 0xd4, 0xb1, + 0x89, 0xa7, 0xba, 0xe3, 0x43, 0xe5, 0x52, 0x3a, 0x8f, 0x54, 0x28, 0x6e, 0x07, 0xfb, 0x00, 0xb8, + 0xd8, 0xc3, 0x16, 0xe1, 0xa2, 0x92, 0x0b, 0xf2, 0x0e, 0x9e, 0xbd, 0xe9, 0x0e, 0xe2, 0xc7, 0xd2, + 0xcf, 0x43, 0xd7, 0x13, 0x9b, 0x7b, 0x83, 0x28, 0xc4, 0x48, 0x81, 0x62, 0xf8, 0xf0, 0x1a, 0x6c, + 0x78, 0xc4, 0xec, 0x63, 0x6a, 0xa9, 0x67, 0xbd, 0x28, 0xc3, 0x3c, 0x11, 0xcf, 0x2b, 0x8a, 0x2b, + 0xee, 0x86, 0xb5, 0xa7, 0xd9, 0x11, 0x5d, 0x3f, 0x27, 0x1e, 0xa3, 0x8c, 0x13, 0x9b, 0x07, 0xd4, + 0x49, 0xf8, 0xa0, 0x24, 0xb6, 0x78, 0x02, 0x2c, 0xf1, 0x40, 0x3e, 0x77, 0x39, 0x75, 0x6c, 0x56, + 0x5e, 0x8a, 0x9e, 0x80, 0x56, 0x4c, 0x8e, 0x12, 0x56, 0xf0, 0x14, 0xec, 0x88, 0x6e, 0xfd, 0xb3, + 0x60, 0x83, 0x93, 0x5b, 0x17, 0xdb, 0xe2, 0xaa, 0xca, 0xcb, 0xf2, 0x2d, 0x2e, 0x8b, 0xe9, 0xe8, + 0x30, 0x47, 0x8f, 0x72, 0xbd, 0xe0, 0x67, 0x60, 0x3b, 0x18, 0x8f, 0x0c, 0x6a, 0x77, 0xa9, 0xdd, + 0x13, 0xc3, 0x91, 0x1c, 0x0b, 0xd6, 0x8c, 0x27, 0xa2, 0x36, 0x5e, 0xa4, 0x95, 0x77, 0x79, 0x42, + 0x94, 0x05, 0x81, 0x5f, 0x82, 0x6d, 0xb9, 0x23, 0xe9, 0xaa, 0xc6, 0x42, 0x09, 0x2b, 0xaf, 0x66, + 0x67, 0x1b, 0x71, 0x75, 0x82, 0x48, 0xe3, 0xf6, 0x33, 0x6e, 0x53, 0x17, 0xc4, 0xb3, 0x8c, 0xb7, + 0x55, 0xbe, 0xb6, 0x0f, 0xd3, 0x50, 0x28, 0x8b, 0x5e, 0xf9, 0x10, 0x6c, 0xa6, 0x12, 0x0e, 0xb7, + 0x40, 0xe1, 0x9a, 0x0c, 0x82, 0xf7, 0x1a, 0x89, 0x9f, 0x70, 0x07, 0x2c, 0xdd, 0xe0, 0xbe, 0x4f, + 0x02, 0x06, 0xa2, 0x60, 0xf1, 0xc1, 0xe2, 0x33, 0xad, 0xfe, 0x67, 0x0d, 0x24, 0x1a, 0xdb, 0x1c, + 0x8a, 0xbb, 0x95, 0x2c, 0xee, 0xbd, 0x69, 0x89, 0x3d, 0xa1, 0xac, 0x7f, 0xa9, 0x81, 0xf5, 0xf8, + 0x14, 0x08, 0xdf, 0x05, 0xab, 0xd8, 0xef, 0x52, 0x62, 0x9b, 0xe3, 0x99, 0x25, 0x8c, 0xe6, 0x50, + 0xc9, 0x51, 0x68, 0x21, 0x66, 0x44, 0x72, 0xeb, 0x52, 0x0f, 0x0b, 0xa6, 0xb5, 0x89, 0xe9, 0xd8, + 0x5d, 0x26, 0xaf, 0xa9, 0x10, 0x34, 0xca, 0x93, 0xb4, 0x12, 0x65, 0xed, 0xeb, 0xbf, 0x5b, 0x04, + 0x5b, 0x01, 0x41, 0x82, 0x4f, 0x04, 0x8b, 0xd8, 0x7c, 0x0e, 0xed, 0x05, 0x25, 0xc6, 0xbe, 0xa7, + 0xf7, 0x8f, 0x44, 0x51, 0x74, 0x93, 0xe6, 0x3f, 0xf8, 0x39, 0x58, 0x66, 0x1c, 0x73, 0x9f, 0xc9, + 0xe7, 0xaf, 0x74, 0x70, 0x30, 0x13, 0xaa, 0xf4, 0x8c, 0xe6, 0xbf, 0x60, 0x8d, 0x14, 0x62, 0xfd, + 0x2f, 0x1a, 0xd8, 0x49, 0xbb, 0xcc, 0x81, 0x70, 0x9f, 0x26, 0x09, 0xf7, 0xee, 0x2c, 0x27, 0x9a, + 0x40, 0xba, 0x7f, 0x68, 0xe0, 0xad, 0xcc, 0xe1, 0xe5, 0x3b, 0x2b, 0x7a, 0x95, 0x9b, 0xea, 0x88, + 0x67, 0xd1, 0xf8, 0x2c, 0x7b, 0xd5, 0x79, 0x8e, 0x1e, 0xe5, 0x7a, 0xc1, 0x2f, 0xc0, 0x16, 0xb5, + 0xfb, 0xd4, 0x26, 0xea, 0x59, 0x8e, 0xd2, 0x9d, 0xdb, 0x50, 0xd2, 0xc8, 0x32, 0xcd, 0x3b, 0x62, + 0x7a, 0x69, 0xa6, 0x50, 0x50, 0x06, 0xb7, 0xfe, 0xb7, 0x9c, 0xf4, 0xc8, 0xb1, 0x52, 0x54, 0x94, + 0x94, 0x10, 0x2f, 0x53, 0x51, 0x4a, 0x8e, 0x42, 0x0b, 0xc9, 0x20, 0x79, 0x15, 0x2a, 0xd0, 0xd9, + 0x18, 0x24, 0x3d, 0x63, 0x0c, 0x92, 0x6b, 0xa4, 0x10, 0x45, 0x24, 0x62, 0x6c, 0x8b, 0x8d, 0x67, + 0x61, 0x24, 0x67, 0x4a, 0x8e, 0x42, 0x8b, 0xfa, 0x7f, 0x0b, 0x39, 0x59, 0x92, 0x54, 0x8c, 0x1d, + 0x69, 0xfc, 0x85, 0x9f, 0x3e, 0x52, 0x37, 0x3c, 0x52, 0x17, 0xfe, 0x56, 0x03, 0x10, 0x87, 0x10, + 0xad, 0x31, 0x55, 0x03, 0x3e, 0x7d, 0x3c, 0x7b, 0x85, 0xe8, 0x87, 0x19, 0xb0, 0xe0, 0xad, 0xae, + 0xa8, 0x20, 0x60, 0xd6, 0x00, 0xe5, 0x44, 0x00, 0x29, 0x28, 0x05, 0xd2, 0x13, 0xcf, 0x73, 0x3c, + 0x55, 0xb2, 0xef, 0xdc, 0x1f, 0x90, 0x34, 0x37, 0xaa, 0xf2, 0x9b, 0x28, 0xf2, 0xbf, 0x1b, 0xd6, + 0x4a, 0x31, 0x3d, 0x8a, 0x63, 0x8b, 0xad, 0xba, 0x24, 0xda, 0xaa, 0xf8, 0x7f, 0x6c, 0x75, 0x4c, + 0x26, 0x6f, 0x15, 0xc3, 0xae, 0x9c, 0x80, 0x6f, 0x4d, 0xb8, 0xa0, 0x99, 0xde, 0xb6, 0xd7, 0x8b, + 0xe0, 0x51, 0x78, 0xff, 0x1e, 0xed, 0xf8, 0x9c, 0xb0, 0x79, 0x4d, 0x7e, 0x07, 0x00, 0x04, 0x9f, + 0x4f, 0x92, 0xaa, 0xc1, 0xe0, 0x17, 0x7a, 0x1c, 0x87, 0x1a, 0x14, 0xb3, 0x82, 0x7e, 0xce, 0xd8, + 0x77, 0x38, 0x15, 0xb9, 0xe2, 0x87, 0x9b, 0x75, 0xfe, 0xfb, 0xa6, 0x13, 0xc4, 0xdf, 0x35, 0xf0, + 0x76, 0x6e, 0x20, 0x73, 0xe8, 0xec, 0x2f, 0x92, 0x9d, 0x7d, 0x7f, 0xe6, 0xcb, 0x9a, 0xd0, 0xde, + 0x7f, 0xa5, 0x81, 0x38, 0x3b, 0xe1, 0x29, 0x28, 0x72, 0xaa, 0x7a, 0x78, 0xe9, 0xe0, 0xc9, 0x74, + 0x27, 0xb8, 0xa0, 0x16, 0x89, 0x9e, 0x58, 0xb1, 0x42, 0x12, 0x05, 0x3e, 0x06, 0x2b, 0x16, 0x61, + 0x0c, 0xf7, 0xc6, 0xc4, 0x08, 0x3f, 0xbd, 0x5b, 0x81, 0x18, 0x8d, 0xf5, 0xf5, 0xf7, 0xc1, 0xc3, + 0x9c, 0x3f, 0x33, 0x60, 0x0d, 0x2c, 0x99, 0xf2, 0xcf, 0x28, 0x11, 0xd0, 0x92, 0xb1, 0x26, 0x0e, + 0x70, 0x24, 0xff, 0x85, 0x0a, 0xe4, 0xc6, 0x0f, 0x5f, 0xbe, 0xaa, 0x2e, 0x7c, 0xf5, 0xaa, 0xba, + 0xf0, 0xf5, 0xab, 0xea, 0xc2, 0x2f, 0x46, 0x55, 0xed, 0xe5, 0xa8, 0xaa, 0x7d, 0x35, 0xaa, 0x6a, + 0x5f, 0x8f, 0xaa, 0xda, 0xbf, 0x46, 0x55, 0xed, 0xd7, 0xaf, 0xab, 0x0b, 0x9f, 0x57, 0x26, 0xff, + 0xcf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x30, 0xdb, 0x24, 0x04, 0x18, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -1664,6 +1727,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2086,6 +2258,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } +func (m *VolumeAttributesClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *VolumeAttributesClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -2384,6 +2594,44 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } +func (this *VolumeAttributesClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&VolumeAttributesClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttributesClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttributesClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttributesClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -5154,6 +5402,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttributesClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index f6e619d05d..dfef3f6cc5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -47,7 +47,7 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; @@ -58,7 +58,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -229,7 +229,7 @@ message CSIDriverSpec { // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -277,7 +277,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -329,7 +329,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -338,7 +338,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -358,7 +358,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -372,7 +372,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -380,7 +380,7 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; @@ -395,7 +395,7 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // provisioner indicates the type of the provisioner. optional string provisioner = 2; @@ -433,7 +433,7 @@ message StorageClass { // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -441,7 +441,7 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of StorageClasses repeated StorageClass items = 2; @@ -468,7 +468,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -486,7 +486,7 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; @@ -508,7 +508,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -552,11 +552,51 @@ message VolumeAttachmentStatus { optional VolumeError detachError = 4; } +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +message VolumeAttributesClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Name of the CSI driver + // This field is immutable. + optional string driverName = 2; + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + map parameters = 3; +} + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +message VolumeAttributesClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of VolumeAttributesClass objects. + repeated VolumeAttributesClass items = 2; +} + // VolumeError captures an error encountered during a volume operation. message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index a281d0f26e..e2214ef2f5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -58,6 +58,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CSIStorageCapacity{}, &CSIStorageCapacityList{}, + + &VolumeAttributesClass{}, + &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 9333a28b8d..ce294e3dba 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -714,3 +714,55 @@ type CSIStorageCapacityList struct { // items is the list of CSIStorageCapacity objects. Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +type VolumeAttributesClass struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Name of the CSI driver + // This field is immutable. + DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +type VolumeAttributesClassList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of VolumeAttributesClass objects. + Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 6d9d233066..8c1a663507 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -216,6 +216,27 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } +var map_VolumeAttributesClass = map[string]string{ + "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "driverName": "Name of the CSI driver This field is immutable.", + "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", +} + +func (VolumeAttributesClass) SwaggerDoc() map[string]string { + return map_VolumeAttributesClass +} + +var map_VolumeAttributesClassList = map[string]string{ + "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of VolumeAttributesClass objects.", +} + +func (VolumeAttributesClassList) SwaggerDoc() map[string]string { + return map_VolumeAttributesClassList +} + var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "time represents the time the error was encountered.", diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index f0450182b2..d87aa6b90b 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -579,6 +579,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { + if in == nil { + return nil + } + out := new(VolumeAttributesClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttributesClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { + if in == nil { + return nil + } + out := new(VolumeAttributesClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go index c5d23e7d45..4be57dc0d4 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go @@ -264,3 +264,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto index fc8a3346e2..341e0bc5cf 100644 --- a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto @@ -51,7 +51,7 @@ message MigrationCondition { // The last time this condition was updated. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; // The reason for the condition's last transition. // +optional @@ -68,7 +68,7 @@ message StorageVersionMigration { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the migration. // +optional @@ -84,7 +84,7 @@ message StorageVersionMigrationList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of StorageVersionMigration // +patchMergeKey=type diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index 8c4e147f0b..61efeae69c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -144,7 +144,6 @@ type JSONSchemaProps struct { XMapType *string // x-kubernetes-validations -kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go index 09d4872f8a..c7be07a143 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go @@ -19,6 +19,7 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=apiextensions.k8s.io // Package v1 is the v1 version of the API. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index 2ad78822f8..1bbd0ce13e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -41,7 +41,7 @@ message ConversionRequest { // objects is the list of custom resource objects to be converted. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } // ConversionResponse describes a conversion response. @@ -55,14 +55,14 @@ message ConversionResponse { // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; } // ConversionReview describes a conversion request/response. @@ -125,7 +125,7 @@ message CustomResourceDefinition { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec describes how the user wants the resources to appear optional CustomResourceDefinitionSpec spec = 2; @@ -146,7 +146,7 @@ message CustomResourceDefinitionCondition { // lastTransitionTime last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -162,7 +162,7 @@ message CustomResourceDefinitionList { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual CustomResourceDefinition objects repeated CustomResourceDefinition items = 2; @@ -573,7 +573,6 @@ message JSONSchemaProps { optional string xKubernetesMapType = 43; // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go index 12cc2f6f2c..321bec385c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go @@ -130,7 +130,7 @@ func (s JSON) MarshalJSON() ([]byte, error) { func (s *JSON) UnmarshalJSON(data []byte) error { if len(data) > 0 && !bytes.Equal(data, nullLiteral) { - s.Raw = data + s.Raw = append(s.Raw[0:0], data...) } return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go index e1d1e0be39..212cea6f69 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -383,6 +383,7 @@ const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s. // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. @@ -401,6 +402,7 @@ type CustomResourceDefinition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // CustomResourceDefinitionList is a list of CustomResourceDefinition objects. type CustomResourceDefinitionList struct { @@ -469,6 +471,7 @@ type CustomResourceSubresourceScale struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ConversionReview describes a conversion request/response. type ConversionReview struct { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go index 5dbdf576b3..197bd1b7a8 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -189,7 +189,6 @@ type JSONSchemaProps struct { XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..e3acc247c6 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConversionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinition) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinitionList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 62d7a33dcd..f9e5600345 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -41,7 +41,7 @@ message ConversionRequest { // objects is the list of custom resource objects to be converted. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } // ConversionResponse describes a conversion response. @@ -55,14 +55,14 @@ message ConversionResponse { // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; } // ConversionReview describes a conversion request/response. @@ -138,7 +138,7 @@ message CustomResourceDefinition { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec describes how the user wants the resources to appear optional CustomResourceDefinitionSpec spec = 2; @@ -159,7 +159,7 @@ message CustomResourceDefinitionCondition { // lastTransitionTime last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -175,7 +175,7 @@ message CustomResourceDefinitionList { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual CustomResourceDefinition objects repeated CustomResourceDefinition items = 2; @@ -634,7 +634,6 @@ message JSONSchemaProps { optional string xKubernetesMapType = 43; // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go index 44941d82ef..43b9038787 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go @@ -130,7 +130,7 @@ func (s JSON) MarshalJSON() ([]byte, error) { func (s *JSON) UnmarshalJSON(data []byte) error { if len(data) > 0 && !bytes.Equal(data, nullLiteral) { - s.Raw = data + s.Raw = append(s.Raw[0:0], data...) } return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 86013e39f7..3ed584dd9d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -189,7 +189,6 @@ type JSONSchemaProps struct { XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go index b83de1c419..eb77daba35 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceColumnDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceColumnDefinition type for use +// CustomResourceColumnDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceColumnDefinition type for use // with apply. type CustomResourceColumnDefinitionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceColumnDefinitionApplyConfiguration struct { JSONPath *string `json:"jsonPath,omitempty"` } -// CustomResourceColumnDefinitionApplyConfiguration constructs an declarative configuration of the CustomResourceColumnDefinition type for use with +// CustomResourceColumnDefinitionApplyConfiguration constructs a declarative configuration of the CustomResourceColumnDefinition type for use with // apply. func CustomResourceColumnDefinition() *CustomResourceColumnDefinitionApplyConfiguration { return &CustomResourceColumnDefinitionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go index 8705d1a219..973e614e32 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// CustomResourceConversionApplyConfiguration represents an declarative configuration of the CustomResourceConversion type for use +// CustomResourceConversionApplyConfiguration represents a declarative configuration of the CustomResourceConversion type for use // with apply. type CustomResourceConversionApplyConfiguration struct { Strategy *v1.ConversionStrategyType `json:"strategy,omitempty"` Webhook *WebhookConversionApplyConfiguration `json:"webhook,omitempty"` } -// CustomResourceConversionApplyConfiguration constructs an declarative configuration of the CustomResourceConversion type for use with +// CustomResourceConversionApplyConfiguration constructs a declarative configuration of the CustomResourceConversion type for use with // apply. func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { return &CustomResourceConversionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go index f6b6edb7fd..12417b2e62 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CustomResourceDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceDefinition type for use +// CustomResourceDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceDefinition type for use // with apply. type CustomResourceDefinitionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -33,7 +33,7 @@ type CustomResourceDefinitionApplyConfiguration struct { Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` } -// CustomResourceDefinition constructs an declarative configuration of the CustomResourceDefinition type for use with +// CustomResourceDefinition constructs a declarative configuration of the CustomResourceDefinition type for use with // apply. func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfiguration { b := &CustomResourceDefinitionApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomRes b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CustomResourceDefinitionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go index 2cf9dd4e50..fb070cb6a8 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CustomResourceDefinitionConditionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionCondition type for use +// CustomResourceDefinitionConditionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionCondition type for use // with apply. type CustomResourceDefinitionConditionApplyConfiguration struct { Type *v1.CustomResourceDefinitionConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type CustomResourceDefinitionConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// CustomResourceDefinitionConditionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionCondition type for use with +// CustomResourceDefinitionConditionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionCondition type for use with // apply. func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApplyConfiguration { return &CustomResourceDefinitionConditionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go index 06b7a40428..ca0c02f0e0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceDefinitionNamesApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionNames type for use +// CustomResourceDefinitionNamesApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionNames type for use // with apply. type CustomResourceDefinitionNamesApplyConfiguration struct { Plural *string `json:"plural,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceDefinitionNamesApplyConfiguration struct { Categories []string `json:"categories,omitempty"` } -// CustomResourceDefinitionNamesApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionNames type for use with +// CustomResourceDefinitionNamesApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionNames type for use with // apply. func CustomResourceDefinitionNames() *CustomResourceDefinitionNamesApplyConfiguration { return &CustomResourceDefinitionNamesApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go index 0f52e4b16b..9d0573f44b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go @@ -22,7 +22,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// CustomResourceDefinitionSpecApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionSpec type for use +// CustomResourceDefinitionSpecApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionSpec type for use // with apply. type CustomResourceDefinitionSpecApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -33,7 +33,7 @@ type CustomResourceDefinitionSpecApplyConfiguration struct { PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty"` } -// CustomResourceDefinitionSpecApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionSpec type for use with +// CustomResourceDefinitionSpecApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionSpec type for use with // apply. func CustomResourceDefinitionSpec() *CustomResourceDefinitionSpecApplyConfiguration { return &CustomResourceDefinitionSpecApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go index a30fb726bc..4fd09be5a0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceDefinitionStatusApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionStatus type for use +// CustomResourceDefinitionStatusApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionStatus type for use // with apply. type CustomResourceDefinitionStatusApplyConfiguration struct { Conditions []CustomResourceDefinitionConditionApplyConfiguration `json:"conditions,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceDefinitionStatusApplyConfiguration struct { StoredVersions []string `json:"storedVersions,omitempty"` } -// CustomResourceDefinitionStatusApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionStatus type for use with +// CustomResourceDefinitionStatusApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionStatus type for use with // apply. func CustomResourceDefinitionStatus() *CustomResourceDefinitionStatusApplyConfiguration { return &CustomResourceDefinitionStatusApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go index aaf2a139cf..f96ba88f46 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceDefinitionVersionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionVersion type for use +// CustomResourceDefinitionVersionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionVersion type for use // with apply. type CustomResourceDefinitionVersionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type CustomResourceDefinitionVersionApplyConfiguration struct { SelectableFields []SelectableFieldApplyConfiguration `json:"selectableFields,omitempty"` } -// CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with +// CustomResourceDefinitionVersionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionVersion type for use with // apply. func CustomResourceDefinitionVersion() *CustomResourceDefinitionVersionApplyConfiguration { return &CustomResourceDefinitionVersionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go index e91ede1782..e66e710c40 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// CustomResourceSubresourcesApplyConfiguration represents an declarative configuration of the CustomResourceSubresources type for use +// CustomResourceSubresourcesApplyConfiguration represents a declarative configuration of the CustomResourceSubresources type for use // with apply. type CustomResourceSubresourcesApplyConfiguration struct { Status *v1.CustomResourceSubresourceStatus `json:"status,omitempty"` Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` } -// CustomResourceSubresourcesApplyConfiguration constructs an declarative configuration of the CustomResourceSubresources type for use with +// CustomResourceSubresourcesApplyConfiguration constructs a declarative configuration of the CustomResourceSubresources type for use with // apply. func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration { return &CustomResourceSubresourcesApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go index 8159cec2a7..7859675fdd 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceSubresourceScaleApplyConfiguration represents an declarative configuration of the CustomResourceSubresourceScale type for use +// CustomResourceSubresourceScaleApplyConfiguration represents a declarative configuration of the CustomResourceSubresourceScale type for use // with apply. type CustomResourceSubresourceScaleApplyConfiguration struct { SpecReplicasPath *string `json:"specReplicasPath,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceSubresourceScaleApplyConfiguration struct { LabelSelectorPath *string `json:"labelSelectorPath,omitempty"` } -// CustomResourceSubresourceScaleApplyConfiguration constructs an declarative configuration of the CustomResourceSubresourceScale type for use with +// CustomResourceSubresourceScaleApplyConfiguration constructs a declarative configuration of the CustomResourceSubresourceScale type for use with // apply. func CustomResourceSubresourceScale() *CustomResourceSubresourceScaleApplyConfiguration { return &CustomResourceSubresourceScaleApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go index 2e0bcbcb5f..6a8cf17d55 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// CustomResourceValidationApplyConfiguration represents an declarative configuration of the CustomResourceValidation type for use +// CustomResourceValidationApplyConfiguration represents a declarative configuration of the CustomResourceValidation type for use // with apply. type CustomResourceValidationApplyConfiguration struct { OpenAPIV3Schema *JSONSchemaPropsApplyConfiguration `json:"openAPIV3Schema,omitempty"` } -// CustomResourceValidationApplyConfiguration constructs an declarative configuration of the CustomResourceValidation type for use with +// CustomResourceValidationApplyConfiguration constructs a declarative configuration of the CustomResourceValidation type for use with // apply. func CustomResourceValidation() *CustomResourceValidationApplyConfiguration { return &CustomResourceValidationApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go index 61856a15ce..761a957a02 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ExternalDocumentationApplyConfiguration represents an declarative configuration of the ExternalDocumentation type for use +// ExternalDocumentationApplyConfiguration represents a declarative configuration of the ExternalDocumentation type for use // with apply. type ExternalDocumentationApplyConfiguration struct { Description *string `json:"description,omitempty"` URL *string `json:"url,omitempty"` } -// ExternalDocumentationApplyConfiguration constructs an declarative configuration of the ExternalDocumentation type for use with +// ExternalDocumentationApplyConfiguration constructs a declarative configuration of the ExternalDocumentation type for use with // apply. func ExternalDocumentation() *ExternalDocumentationApplyConfiguration { return &ExternalDocumentationApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go index 730203add1..1acbe61307 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// JSONSchemaPropsApplyConfiguration represents an declarative configuration of the JSONSchemaProps type for use +// JSONSchemaPropsApplyConfiguration represents a declarative configuration of the JSONSchemaProps type for use // with apply. type JSONSchemaPropsApplyConfiguration struct { ID *string `json:"id,omitempty"` @@ -71,7 +71,7 @@ type JSONSchemaPropsApplyConfiguration struct { XValidations *v1.ValidationRules `json:"x-kubernetes-validations,omitempty"` } -// JSONSchemaPropsApplyConfiguration constructs an declarative configuration of the JSONSchemaProps type for use with +// JSONSchemaPropsApplyConfiguration constructs a declarative configuration of the JSONSchemaProps type for use with // apply. func JSONSchemaProps() *JSONSchemaPropsApplyConfiguration { return &JSONSchemaPropsApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go index 876dfa71c7..33f655a764 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SelectableFieldApplyConfiguration represents an declarative configuration of the SelectableField type for use +// SelectableFieldApplyConfiguration represents a declarative configuration of the SelectableField type for use // with apply. type SelectableFieldApplyConfiguration struct { JSONPath *string `json:"jsonPath,omitempty"` } -// SelectableFieldApplyConfiguration constructs an declarative configuration of the SelectableField type for use with +// SelectableFieldApplyConfiguration constructs a declarative configuration of the SelectableField type for use with // apply. func SelectableField() *SelectableFieldApplyConfiguration { return &SelectableFieldApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go index 2cd55d9ea2..239780664d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go index c0eb0b51bd..e5fc80c7e9 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// ValidationRuleApplyConfiguration represents an declarative configuration of the ValidationRule type for use +// ValidationRuleApplyConfiguration represents a declarative configuration of the ValidationRule type for use // with apply. type ValidationRuleApplyConfiguration struct { Rule *string `json:"rule,omitempty"` @@ -33,7 +33,7 @@ type ValidationRuleApplyConfiguration struct { OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } -// ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with +// ValidationRuleApplyConfiguration constructs a declarative configuration of the ValidationRule type for use with // apply. func ValidationRule() *ValidationRuleApplyConfiguration { return &ValidationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go index aa358ae205..77f2227b95 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go index 2af1b70ba4..884fbc5fa8 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// WebhookConversionApplyConfiguration represents an declarative configuration of the WebhookConversion type for use +// WebhookConversionApplyConfiguration represents a declarative configuration of the WebhookConversion type for use // with apply. type WebhookConversionApplyConfiguration struct { ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"` ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` } -// WebhookConversionApplyConfiguration constructs an declarative configuration of the WebhookConversion type for use with +// WebhookConversionApplyConfiguration constructs a declarative configuration of the WebhookConversion type for use with // apply. func WebhookConversion() *WebhookConversionApplyConfiguration { return &WebhookConversionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go index e473178434..9ee2318d1a 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceColumnDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceColumnDefinition type for use +// CustomResourceColumnDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceColumnDefinition type for use // with apply. type CustomResourceColumnDefinitionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceColumnDefinitionApplyConfiguration struct { JSONPath *string `json:"JSONPath,omitempty"` } -// CustomResourceColumnDefinitionApplyConfiguration constructs an declarative configuration of the CustomResourceColumnDefinition type for use with +// CustomResourceColumnDefinitionApplyConfiguration constructs a declarative configuration of the CustomResourceColumnDefinition type for use with // apply. func CustomResourceColumnDefinition() *CustomResourceColumnDefinitionApplyConfiguration { return &CustomResourceColumnDefinitionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go index d9825f85a9..b0171f16a0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// CustomResourceConversionApplyConfiguration represents an declarative configuration of the CustomResourceConversion type for use +// CustomResourceConversionApplyConfiguration represents a declarative configuration of the CustomResourceConversion type for use // with apply. type CustomResourceConversionApplyConfiguration struct { Strategy *v1beta1.ConversionStrategyType `json:"strategy,omitempty"` @@ -30,7 +30,7 @@ type CustomResourceConversionApplyConfiguration struct { ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` } -// CustomResourceConversionApplyConfiguration constructs an declarative configuration of the CustomResourceConversion type for use with +// CustomResourceConversionApplyConfiguration constructs a declarative configuration of the CustomResourceConversion type for use with // apply. func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { return &CustomResourceConversionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go index 9117748c75..bbda379324 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CustomResourceDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceDefinition type for use +// CustomResourceDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceDefinition type for use // with apply. type CustomResourceDefinitionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -33,7 +33,7 @@ type CustomResourceDefinitionApplyConfiguration struct { Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` } -// CustomResourceDefinition constructs an declarative configuration of the CustomResourceDefinition type for use with +// CustomResourceDefinition constructs a declarative configuration of the CustomResourceDefinition type for use with // apply. func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfiguration { b := &CustomResourceDefinitionApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomRes b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CustomResourceDefinitionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go index cf2400c9c2..7f2f132ac7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CustomResourceDefinitionConditionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionCondition type for use +// CustomResourceDefinitionConditionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionCondition type for use // with apply. type CustomResourceDefinitionConditionApplyConfiguration struct { Type *v1beta1.CustomResourceDefinitionConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type CustomResourceDefinitionConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// CustomResourceDefinitionConditionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionCondition type for use with +// CustomResourceDefinitionConditionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionCondition type for use with // apply. func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApplyConfiguration { return &CustomResourceDefinitionConditionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go index a20200723c..44b49bcbbe 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceDefinitionNamesApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionNames type for use +// CustomResourceDefinitionNamesApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionNames type for use // with apply. type CustomResourceDefinitionNamesApplyConfiguration struct { Plural *string `json:"plural,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceDefinitionNamesApplyConfiguration struct { Categories []string `json:"categories,omitempty"` } -// CustomResourceDefinitionNamesApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionNames type for use with +// CustomResourceDefinitionNamesApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionNames type for use with // apply. func CustomResourceDefinitionNames() *CustomResourceDefinitionNamesApplyConfiguration { return &CustomResourceDefinitionNamesApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go index 49f4e433c7..5046882ae1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go @@ -22,7 +22,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// CustomResourceDefinitionSpecApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionSpec type for use +// CustomResourceDefinitionSpecApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionSpec type for use // with apply. type CustomResourceDefinitionSpecApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -38,7 +38,7 @@ type CustomResourceDefinitionSpecApplyConfiguration struct { PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty"` } -// CustomResourceDefinitionSpecApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionSpec type for use with +// CustomResourceDefinitionSpecApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionSpec type for use with // apply. func CustomResourceDefinitionSpec() *CustomResourceDefinitionSpecApplyConfiguration { return &CustomResourceDefinitionSpecApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go index 79b2ebdabb..2c9c5e23c1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceDefinitionStatusApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionStatus type for use +// CustomResourceDefinitionStatusApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionStatus type for use // with apply. type CustomResourceDefinitionStatusApplyConfiguration struct { Conditions []CustomResourceDefinitionConditionApplyConfiguration `json:"conditions,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceDefinitionStatusApplyConfiguration struct { StoredVersions []string `json:"storedVersions,omitempty"` } -// CustomResourceDefinitionStatusApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionStatus type for use with +// CustomResourceDefinitionStatusApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionStatus type for use with // apply. func CustomResourceDefinitionStatus() *CustomResourceDefinitionStatusApplyConfiguration { return &CustomResourceDefinitionStatusApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go index e110a1ec5b..19ac26b039 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceDefinitionVersionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionVersion type for use +// CustomResourceDefinitionVersionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionVersion type for use // with apply. type CustomResourceDefinitionVersionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type CustomResourceDefinitionVersionApplyConfiguration struct { SelectableFields []SelectableFieldApplyConfiguration `json:"selectableFields,omitempty"` } -// CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with +// CustomResourceDefinitionVersionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionVersion type for use with // apply. func CustomResourceDefinitionVersion() *CustomResourceDefinitionVersionApplyConfiguration { return &CustomResourceDefinitionVersionApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go index a62f8a2050..3847b8789c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// CustomResourceSubresourcesApplyConfiguration represents an declarative configuration of the CustomResourceSubresources type for use +// CustomResourceSubresourcesApplyConfiguration represents a declarative configuration of the CustomResourceSubresources type for use // with apply. type CustomResourceSubresourcesApplyConfiguration struct { Status *v1beta1.CustomResourceSubresourceStatus `json:"status,omitempty"` Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` } -// CustomResourceSubresourcesApplyConfiguration constructs an declarative configuration of the CustomResourceSubresources type for use with +// CustomResourceSubresourcesApplyConfiguration constructs a declarative configuration of the CustomResourceSubresources type for use with // apply. func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration { return &CustomResourceSubresourcesApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go index 72934ce97e..b94d0e6685 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceSubresourceScaleApplyConfiguration represents an declarative configuration of the CustomResourceSubresourceScale type for use +// CustomResourceSubresourceScaleApplyConfiguration represents a declarative configuration of the CustomResourceSubresourceScale type for use // with apply. type CustomResourceSubresourceScaleApplyConfiguration struct { SpecReplicasPath *string `json:"specReplicasPath,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceSubresourceScaleApplyConfiguration struct { LabelSelectorPath *string `json:"labelSelectorPath,omitempty"` } -// CustomResourceSubresourceScaleApplyConfiguration constructs an declarative configuration of the CustomResourceSubresourceScale type for use with +// CustomResourceSubresourceScaleApplyConfiguration constructs a declarative configuration of the CustomResourceSubresourceScale type for use with // apply. func CustomResourceSubresourceScale() *CustomResourceSubresourceScaleApplyConfiguration { return &CustomResourceSubresourceScaleApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go index 9f65653da0..a5cf3c096b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// CustomResourceValidationApplyConfiguration represents an declarative configuration of the CustomResourceValidation type for use +// CustomResourceValidationApplyConfiguration represents a declarative configuration of the CustomResourceValidation type for use // with apply. type CustomResourceValidationApplyConfiguration struct { OpenAPIV3Schema *JSONSchemaPropsApplyConfiguration `json:"openAPIV3Schema,omitempty"` } -// CustomResourceValidationApplyConfiguration constructs an declarative configuration of the CustomResourceValidation type for use with +// CustomResourceValidationApplyConfiguration constructs a declarative configuration of the CustomResourceValidation type for use with // apply. func CustomResourceValidation() *CustomResourceValidationApplyConfiguration { return &CustomResourceValidationApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go index 360f6d9695..5140d66ceb 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExternalDocumentationApplyConfiguration represents an declarative configuration of the ExternalDocumentation type for use +// ExternalDocumentationApplyConfiguration represents a declarative configuration of the ExternalDocumentation type for use // with apply. type ExternalDocumentationApplyConfiguration struct { Description *string `json:"description,omitempty"` URL *string `json:"url,omitempty"` } -// ExternalDocumentationApplyConfiguration constructs an declarative configuration of the ExternalDocumentation type for use with +// ExternalDocumentationApplyConfiguration constructs a declarative configuration of the ExternalDocumentation type for use with // apply. func ExternalDocumentation() *ExternalDocumentationApplyConfiguration { return &ExternalDocumentationApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go index 158b5750d0..9c588e2a12 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// JSONSchemaPropsApplyConfiguration represents an declarative configuration of the JSONSchemaProps type for use +// JSONSchemaPropsApplyConfiguration represents a declarative configuration of the JSONSchemaProps type for use // with apply. type JSONSchemaPropsApplyConfiguration struct { ID *string `json:"id,omitempty"` @@ -71,7 +71,7 @@ type JSONSchemaPropsApplyConfiguration struct { XValidations *v1beta1.ValidationRules `json:"x-kubernetes-validations,omitempty"` } -// JSONSchemaPropsApplyConfiguration constructs an declarative configuration of the JSONSchemaProps type for use with +// JSONSchemaPropsApplyConfiguration constructs a declarative configuration of the JSONSchemaProps type for use with // apply. func JSONSchemaProps() *JSONSchemaPropsApplyConfiguration { return &JSONSchemaPropsApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go index 8729d9586b..1a372e6faf 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// SelectableFieldApplyConfiguration represents an declarative configuration of the SelectableField type for use +// SelectableFieldApplyConfiguration represents a declarative configuration of the SelectableField type for use // with apply. type SelectableFieldApplyConfiguration struct { JSONPath *string `json:"jsonPath,omitempty"` } -// SelectableFieldApplyConfiguration constructs an declarative configuration of the SelectableField type for use with +// SelectableFieldApplyConfiguration constructs a declarative configuration of the SelectableField type for use with // apply. func SelectableField() *SelectableFieldApplyConfiguration { return &SelectableFieldApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go index c21b574908..70cc6b5b27 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go index 1b0df078b5..e245ded1a1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// ValidationRuleApplyConfiguration represents an declarative configuration of the ValidationRule type for use +// ValidationRuleApplyConfiguration represents a declarative configuration of the ValidationRule type for use // with apply. type ValidationRuleApplyConfiguration struct { Rule *string `json:"rule,omitempty"` @@ -33,7 +33,7 @@ type ValidationRuleApplyConfiguration struct { OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } -// ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with +// ValidationRuleApplyConfiguration constructs a declarative configuration of the ValidationRule type for use with // apply. func ValidationRule() *ValidationRuleApplyConfiguration { return &ValidationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go index 490f9d5f3f..76ff71b4ae 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go index 3949426cb7..03d3dea820 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -20,9 +20,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1" @@ -30,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. @@ -43,6 +40,7 @@ type CustomResourceDefinitionsGetter interface { type CustomResourceDefinitionInterface interface { Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type CustomResourceDefinitionInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } // customResourceDefinitions implements CustomResourceDefinitionInterface type customResourceDefinitions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CustomResourceDefinition, *v1.CustomResourceDefinitionList, *apiextensionsv1.CustomResourceDefinitionApplyConfiguration] } // newCustomResourceDefinitions returns a CustomResourceDefinitions func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { return &customResourceDefinitions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CustomResourceDefinition, *v1.CustomResourceDefinitionList, *apiextensionsv1.CustomResourceDefinitionApplyConfiguration]( + "customresourcedefinitions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CustomResourceDefinition { return &v1.CustomResourceDefinition{} }, + func() *v1.CustomResourceDefinitionList { return &v1.CustomResourceDefinitionList{} }), } } - -// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. -func (c *customResourceDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. -func (c *customResourceDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CustomResourceDefinitionList{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested customResourceDefinitions. -func (c *customResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Post(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. -func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("customresourcedefinitions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("customresourcedefinitions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched customResourceDefinition. -func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Patch(pt). - Resource("customresourcedefinitions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. -func (c *customResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - result = &v1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *customResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - - result = &v1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go index 0e5b482a30..47f9655975 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -20,9 +20,6 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1" @@ -30,7 +27,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. @@ -43,6 +40,7 @@ type CustomResourceDefinitionsGetter interface { type CustomResourceDefinitionInterface interface { Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*v1beta1.CustomResourceDefinition, error) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type CustomResourceDefinitionInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } // customResourceDefinitions implements CustomResourceDefinitionInterface type customResourceDefinitions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CustomResourceDefinition, *v1beta1.CustomResourceDefinitionList, *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration] } // newCustomResourceDefinitions returns a CustomResourceDefinitions func newCustomResourceDefinitions(c *ApiextensionsV1beta1Client) *customResourceDefinitions { return &customResourceDefinitions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CustomResourceDefinition, *v1beta1.CustomResourceDefinitionList, *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration]( + "customresourcedefinitions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CustomResourceDefinition { return &v1beta1.CustomResourceDefinition{} }, + func() *v1beta1.CustomResourceDefinitionList { return &v1beta1.CustomResourceDefinitionList{} }), } } - -// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. -func (c *customResourceDefinitions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. -func (c *customResourceDefinitions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CustomResourceDefinitionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CustomResourceDefinitionList{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested customResourceDefinitions. -func (c *customResourceDefinitions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Post(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. -func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("customresourcedefinitions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("customresourcedefinitions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched customResourceDefinition. -func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Patch(pt). - Resource("customresourcedefinitions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. -func (c *customResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *customResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go new file mode 100644 index 0000000000..72c6438cb6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testrestmapper + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TestOnlyStaticRESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: +// 1. legacy kube group preferred version, extensions preferred version, metrics preferred version, legacy +// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, +// all other groups alphabetical. +// +// TODO callers of this method should be updated to build their own specific restmapper based on their scheme for their tests +// TODO the things being tested are related to whether various cases are handled, not tied to the particular types being checked. +func TestOnlyStaticRESTMapper(scheme *runtime.Scheme, versionPatterns ...schema.GroupVersion) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + unionMapper = append(unionMapper, newRESTMapper(enabledVersion.Group, scheme)) + } + } + + if len(versionPatterns) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + for _, versionPriority := range versionPatterns { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + prioritizedGroups := []string{"", "extensions", "metrics"} + resourcePriority, kindPriority := prioritiesForGroups(scheme, prioritizedGroups...) + + prioritizedGroupsSet := sets.NewString(prioritizedGroups...) + remainingGroups := sets.String{} + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !prioritizedGroupsSet.Has(enabledVersion.Group) { + remainingGroups.Insert(enabledVersion.Group) + } + } + + remainingResourcePriority, remainingKindPriority := prioritiesForGroups(scheme, remainingGroups.List()...) + resourcePriority = append(resourcePriority, remainingResourcePriority...) + kindPriority = append(kindPriority, remainingKindPriority...) + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} +} + +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, +// then any non-preferred version of the group second. +func prioritiesForGroups(scheme *runtime.Scheme, groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, group := range groups { + availableVersions := scheme.PrioritizedVersionsForGroup(group) + if len(availableVersions) > 0 { + resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) + } + } + for _, group := range groups { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) + kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) + } + + return resourcePriority, kindPriority +} + +func newRESTMapper(group string, scheme *runtime.Scheme) meta.RESTMapper { + mapper := meta.NewDefaultRESTMapper(scheme.PrioritizedVersionsForGroup(group)) + for _, gv := range scheme.PrioritizedVersionsForGroup(group) { + for kind := range scheme.KnownTypes(gv) { + if ignoredKinds.Has(kind) { + continue + } + scope := meta.RESTScopeNamespace + if rootScopedKinds[gv.WithKind(kind).GroupKind()] { + scope = meta.RESTScopeRoot + } + mapper.Add(gv.WithKind(kind), scope) + } + } + + return mapper +} + +// hardcoded is good enough for the test we're running +var rootScopedKinds = map[schema.GroupKind]bool{ + {Group: "admission.k8s.io", Kind: "AdmissionReview"}: true, + + {Group: "admissionregistration.k8s.io", Kind: "ValidatingWebhookConfiguration"}: true, + {Group: "admissionregistration.k8s.io", Kind: "MutatingWebhookConfiguration"}: true, + + {Group: "authentication.k8s.io", Kind: "TokenReview"}: true, + + {Group: "authorization.k8s.io", Kind: "SubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectRulesReview"}: true, + + {Group: "certificates.k8s.io", Kind: "CertificateSigningRequest"}: true, + + {Group: "", Kind: "Node"}: true, + {Group: "", Kind: "Namespace"}: true, + {Group: "", Kind: "PersistentVolume"}: true, + {Group: "", Kind: "ComponentStatus"}: true, + + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: true, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRoleBinding"}: true, + + {Group: "scheduling.k8s.io", Kind: "PriorityClass"}: true, + + {Group: "storage.k8s.io", Kind: "StorageClass"}: true, + {Group: "storage.k8s.io", Kind: "VolumeAttachment"}: true, + + {Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: true, + + {Group: "apiserver.k8s.io", Kind: "AdmissionConfiguration"}: true, + + {Group: "audit.k8s.io", Kind: "Event"}: true, + {Group: "audit.k8s.io", Kind: "Policy"}: true, + + {Group: "apiregistration.k8s.io", Kind: "APIService"}: true, + + {Group: "metrics.k8s.io", Kind: "NodeMetrics"}: true, + + {Group: "wardle.example.com", Kind: "Fischer"}: true, +} + +// hardcoded is good enough for the test we're running +var ignoredKinds = sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 69f1bc336d..50af8334f0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + inf "gopkg.in/inf.v0" ) @@ -683,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +func (q Quantity) MarshalCBOR() ([]byte, error) { + // The call to String() should never return the string "" because the receiver's + // address will never be nil. + return cbor.Marshal(q.String()) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (q Quantity) ToUnstructured() interface{} { return q.String() @@ -711,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { return nil } +func (q *Quantity) UnmarshalCBOR(value []byte) error { + var s *string + if err := cbor.Unmarshal(value, &s); err != nil { + return err + } + + if s == nil { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + + parsed, err := ParseQuantity(strings.TrimSpace(*s)) + if err != nil { + return err + } + + *q = parsed + return nil +} + // NewDecimalQuantity returns a new Quantity representing the given // value in the given format. func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 15b45ffa84..5005beb12d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -18,6 +18,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" ) // IsControlledBy checks if the object has a controllerRef set to the given owner @@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference { return nil } cp := *ref + cp.Controller = ptr.To(*ref.Controller) + if ref.BlockOwnerDeletion != nil { + cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion) + } return &cp } -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller func GetControllerOfNoCopy(controllee Object) *OwnerReference { refs := controllee.GetOwnerReferences() for i := range refs { @@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference { // NewControllerRef creates an OwnerReference pointing to the given owner. func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { - blockOwnerDeletion := true - isController := true return &OwnerReference{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: &blockOwnerDeletion, - Controller: &isController, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 75b88890f6..229ea2c2c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() { var xxx_messageInfo_Duration proto.InternalMessageInfo +func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} } +func (*FieldSelectorRequirement) ProtoMessage() {} +func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_a8431b6e0aeeb761, []int{10} +} +func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorRequirement.Merge(m, src) +} +func (m *FieldSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo + func (m *FieldsV1) Reset() { *m = FieldsV1{} } func (*FieldsV1) ProtoMessage() {} func (*FieldsV1) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{10} + return fileDescriptor_a8431b6e0aeeb761, []int{11} } func (m *FieldsV1) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} func (*GetOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{11} + return fileDescriptor_a8431b6e0aeeb761, []int{12} } func (m *GetOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} func (*GroupKind) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{12} + return fileDescriptor_a8431b6e0aeeb761, []int{13} } func (m *GroupKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} func (*GroupResource) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{13} + return fileDescriptor_a8431b6e0aeeb761, []int{14} } func (m *GroupResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} func (*GroupVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{14} + return fileDescriptor_a8431b6e0aeeb761, []int{15} } func (m *GroupVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{15} + return fileDescriptor_a8431b6e0aeeb761, []int{16} } func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} func (*GroupVersionKind) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{16} + return fileDescriptor_a8431b6e0aeeb761, []int{17} } func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} func (*GroupVersionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{17} + return fileDescriptor_a8431b6e0aeeb761, []int{18} } func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} func (*LabelSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{18} + return fileDescriptor_a8431b6e0aeeb761, []int{19} } func (m *LabelSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{19} + return fileDescriptor_a8431b6e0aeeb761, []int{20} } func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{20} + return fileDescriptor_a8431b6e0aeeb761, []int{21} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} func (*ListMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{21} + return fileDescriptor_a8431b6e0aeeb761, []int{22} } func (m *ListMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} func (*ListOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{22} + return fileDescriptor_a8431b6e0aeeb761, []int{23} } func (m *ListOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } func (*ManagedFieldsEntry) ProtoMessage() {} func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{23} + return fileDescriptor_a8431b6e0aeeb761, []int{24} } func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} func (*MicroTime) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{24} + return fileDescriptor_a8431b6e0aeeb761, []int{25} } func (m *MicroTime) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MicroTime.Unmarshal(m, b) @@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{25} + return fileDescriptor_a8431b6e0aeeb761, []int{26} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{26} + return fileDescriptor_a8431b6e0aeeb761, []int{27} } func (m *OwnerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } func (*PartialObjectMetadata) ProtoMessage() {} func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{27} + return fileDescriptor_a8431b6e0aeeb761, []int{28} } func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{28} + return fileDescriptor_a8431b6e0aeeb761, []int{29} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} func (*Patch) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{29} + return fileDescriptor_a8431b6e0aeeb761, []int{30} } func (m *Patch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo func (m *PatchOptions) Reset() { *m = PatchOptions{} } func (*PatchOptions) ProtoMessage() {} func (*PatchOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{30} + return fileDescriptor_a8431b6e0aeeb761, []int{31} } func (m *PatchOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{31} + return fileDescriptor_a8431b6e0aeeb761, []int{32} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} func (*RootPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{32} + return fileDescriptor_a8431b6e0aeeb761, []int{33} } func (m *RootPaths) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{33} + return fileDescriptor_a8431b6e0aeeb761, []int{34} } func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{34} + return fileDescriptor_a8431b6e0aeeb761, []int{35} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} func (*StatusCause) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{35} + return fileDescriptor_a8431b6e0aeeb761, []int{36} } func (m *StatusCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} func (*StatusDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{36} + return fileDescriptor_a8431b6e0aeeb761, []int{37} } func (m *StatusDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo func (m *TableOptions) Reset() { *m = TableOptions{} } func (*TableOptions) ProtoMessage() {} func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{37} + return fileDescriptor_a8431b6e0aeeb761, []int{38} } func (m *TableOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{38} + return fileDescriptor_a8431b6e0aeeb761, []int{39} } func (m *Time) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Time.Unmarshal(m, b) @@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{39} + return fileDescriptor_a8431b6e0aeeb761, []int{40} } func (m *Timestamp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{40} + return fileDescriptor_a8431b6e0aeeb761, []int{41} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} func (*UpdateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{41} + return fileDescriptor_a8431b6e0aeeb761, []int{42} } func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} func (*Verbs) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{42} + return fileDescriptor_a8431b6e0aeeb761, []int{43} } func (m *Verbs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} func (*WatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{43} + return fileDescriptor_a8431b6e0aeeb761, []int{44} } func (m *WatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1282,6 +1310,7 @@ func init() { proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement") proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") @@ -1326,186 +1355,187 @@ func init() { } var fileDescriptor_a8431b6e0aeeb761 = []byte{ - // 2853 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, - 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, - 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, - 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, - 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, - 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85, - 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa, - 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0x3c, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0x7f, 0xdd, 0xea, 0x3a, 0x1d, - 0xab, 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58, - 0xeb, 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1, - 0x63, 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9, - 0xb2, 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f, - 0x5d, 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0xf5, 0x49, 0xa6, - 0xd0, 0x9e, 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9, - 0x58, 0xc3, 0x7c, 0xe6, 0x9f, 0xb2, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad, - 0x41, 0xce, 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc, - 0xe9, 0xa0, 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1, - 0x3d, 0x56, 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70, - 0x57, 0xb2, 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14, - 0x14, 0x92, 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56, - 0xf8, 0x6a, 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2, - 0x11, 0xfd, 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe, - 0xa6, 0xeb, 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83, - 0xf6, 0x27, 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10, - 0xca, 0xe1, 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c, - 0xac, 0xa7, 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3, - 0x67, 0x39, 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d, - 0xca, 0xcc, 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc, - 0xaf, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59, - 0x33, 0x2e, 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b, - 0x4b, 0xab, 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7, - 0xbc, 0xac, 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec, - 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09, - 0xa1, 0x07, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37, - 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a, - 0xf6, 0x72, 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40, - 0xda, 0x3e, 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40, - 0x2c, 0xf0, 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, - 0x75, 0x68, 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, - 0xae, 0x41, 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11, - 0x81, 0x22, 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20, - 0xc3, 0xb1, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4, - 0xf6, 0x95, 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2, - 0xbd, 0xda, 0xcc, 0xdb, 0x7f, 0x5b, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb, - 0xbb, 0xdd, 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf, - 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9, - 0xe2, 0x40, 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55, - 0x97, 0x23, 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d, - 0xdb, 0x09, 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0, - 0x73, 0x30, 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05, - 0xf4, 0x6c, 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51, - 0xf6, 0x4d, 0xf9, 0xec, 0x85, 0xef, 0x47, 0x36, 0xf6, 0xf4, 0xdd, 0x11, 0x0a, 0x3c, 0x86, 0x0b, - 0x9d, 0x00, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x87, 0xa8, 0x0b, - 0xff, 0xa5, 0x74, 0x27, 0xce, 0x39, 0x62, 0xbd, 0xb7, 0x46, 0xa4, 0xe1, 0x31, 0x1a, 0xd0, 0xe3, - 0x30, 0x4b, 0x89, 0xc5, 0x7c, 0xaf, 0x9a, 0x17, 0xcb, 0x8f, 0xa2, 0x32, 0x16, 0x50, 0xac, 0xb0, - 0x3c, 0xa0, 0x75, 0x08, 0x63, 0x56, 0x3b, 0x0c, 0xaf, 0x51, 0x40, 0xdb, 0x91, 0x60, 0x1c, 0xe2, - 0xcd, 0xdf, 0x1a, 0x50, 0xd9, 0xa4, 0xc4, 0x0a, 0xc8, 0x34, 0x6e, 0xf1, 0xa9, 0x4f, 0x1c, 0x6d, - 0xc0, 0x82, 0xf8, 0xbe, 0x6b, 0xb9, 0x8e, 0x2d, 0xcf, 0x20, 0x27, 0x98, 0x3f, 0xaf, 0x98, 0x17, - 0xb6, 0x92, 0x68, 0x3c, 0x4c, 0x6f, 0xfe, 0x24, 0x0b, 0x95, 0x26, 0x71, 0x49, 0x6c, 0xf2, 0x16, - 0xa0, 0x36, 0xb5, 0x5a, 0x64, 0x8f, 0x50, 0xc7, 0xb7, 0xf7, 0x49, 0xcb, 0xf7, 0x6c, 0x26, 0xdc, - 0x28, 0xdb, 0xf8, 0x1c, 0xdf, 0xdf, 0x9b, 0x23, 0x58, 0x3c, 0x86, 0x03, 0xb9, 0x50, 0xe9, 0x52, - 0xf1, 0x5b, 0xec, 0xb9, 0xf4, 0xb2, 0xd2, 0xd5, 0xaf, 0xa4, 0x3b, 0xd2, 0x3d, 0x9d, 0xb5, 0xb1, - 0x74, 0x3a, 0xa8, 0x55, 0x12, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x8b, 0x3e, 0xed, 0x1e, 0x59, - 0x5e, 0x93, 0x74, 0x89, 0x67, 0x13, 0x2f, 0x60, 0x62, 0x23, 0x0b, 0x8d, 0x65, 0x9e, 0x8b, 0xec, - 0x0e, 0xe1, 0xf0, 0x08, 0x35, 0x7a, 0x0d, 0x96, 0xba, 0xd4, 0xef, 0x5a, 0x6d, 0xb1, 0x31, 0x7b, - 0xbe, 0xeb, 0xb4, 0xfa, 0x6a, 0x3b, 0x9f, 0x3c, 0x1d, 0xd4, 0x96, 0xf6, 0x86, 0x91, 0x67, 0x83, - 0xda, 0x05, 0xb1, 0x75, 0x1c, 0x12, 0x23, 0xf1, 0xa8, 0x18, 0xcd, 0x0d, 0xf2, 0x93, 0xdc, 0xc0, - 0xdc, 0x86, 0x42, 0xb3, 0xa7, 0xee, 0xc4, 0x0b, 0x50, 0xb0, 0xd5, 0x6f, 0xb5, 0xf3, 0xe1, 0xe5, - 0x8c, 0x68, 0xce, 0x06, 0xb5, 0x0a, 0x4f, 0x3f, 0xeb, 0x21, 0x00, 0x47, 0x2c, 0xe6, 0xe3, 0x50, - 0x10, 0x07, 0xcf, 0xee, 0x5e, 0x41, 0x8b, 0x90, 0xc5, 0xd6, 0x3d, 0x21, 0xa5, 0x8c, 0xf9, 0x4f, - 0x2d, 0x8a, 0xed, 0x02, 0xdc, 0x24, 0x41, 0x78, 0xf0, 0x1b, 0xb0, 0x10, 0x86, 0xf2, 0xe4, 0x0b, - 0x13, 0x79, 0x13, 0x4e, 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x3a, 0x14, 0xc5, 0x2b, 0xc4, 0x9f, 0xf0, - 0x38, 0x5d, 0x30, 0xee, 0x93, 0x2e, 0x84, 0x39, 0x40, 0x66, 0x52, 0x0e, 0xa0, 0x99, 0xeb, 0x42, - 0x45, 0xf2, 0x86, 0x09, 0x52, 0x2a, 0x0d, 0x4f, 0x42, 0x21, 0x34, 0x53, 0x69, 0x89, 0x12, 0xe3, - 0x50, 0x10, 0x8e, 0x28, 0x34, 0x6d, 0x47, 0x90, 0x78, 0x51, 0xd3, 0x29, 0xd3, 0xb2, 0x9f, 0xcc, - 0xfd, 0xb3, 0x1f, 0x4d, 0xd3, 0x0f, 0xa1, 0x3a, 0x29, 0x9b, 0x7e, 0x80, 0x37, 0x3f, 0xbd, 0x29, - 0xe6, 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xd2, 0x1f, 0x5f, 0x7a, 0x25, 0xe7, 0x67, 0x7b, 0xda, 0x8e, - 0xfc, 0xca, 0x80, 0xe5, 0xc4, 0xd2, 0xa6, 0x3a, 0xf1, 0x29, 0x8c, 0xd2, 0x9d, 0x23, 0x3b, 0x85, - 0x73, 0xfc, 0x25, 0x03, 0x95, 0x5b, 0xd6, 0x01, 0x71, 0xf7, 0x89, 0x4b, 0x5a, 0x81, 0x4f, 0xd1, - 0x0f, 0xa0, 0xd4, 0xb1, 0x82, 0xd6, 0x91, 0x80, 0x86, 0x95, 0x41, 0x33, 0x5d, 0xb0, 0x4b, 0x48, - 0xaa, 0xef, 0xc4, 0x62, 0x6e, 0x78, 0x01, 0xed, 0x37, 0x2e, 0x28, 0x93, 0x4a, 0x1a, 0x06, 0xeb, - 0xda, 0x44, 0x39, 0x27, 0xbe, 0x6f, 0xbc, 0xd5, 0xe5, 0x69, 0xcb, 0xf4, 0x55, 0x64, 0xc2, 0x04, - 0x4c, 0xde, 0xec, 0x39, 0x94, 0x74, 0x88, 0x17, 0xc4, 0xe5, 0xdc, 0xce, 0x90, 0x7c, 0x3c, 0xa2, - 0x71, 0xe5, 0x45, 0x58, 0x1c, 0x36, 0x9e, 0xc7, 0x9f, 0x63, 0xd2, 0x97, 0xe7, 0x85, 0xf9, 0x4f, - 0xb4, 0x0c, 0xf9, 0x13, 0xcb, 0xed, 0xa9, 0xdb, 0x88, 0xe5, 0xc7, 0xf5, 0xcc, 0x35, 0xc3, 0xfc, - 0x8d, 0x01, 0xd5, 0x49, 0x86, 0xa0, 0x2f, 0x6a, 0x82, 0x1a, 0x25, 0x65, 0x55, 0xf6, 0x15, 0xd2, - 0x97, 0x52, 0x6f, 0x40, 0xc1, 0xef, 0xf2, 0x9c, 0xc2, 0xa7, 0xea, 0xd4, 0x9f, 0x08, 0x4f, 0x72, - 0x57, 0xc1, 0xcf, 0x06, 0xb5, 0x8b, 0x09, 0xf1, 0x21, 0x02, 0x47, 0xac, 0x3c, 0x52, 0x0b, 0x7b, - 0xf8, 0xeb, 0x11, 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0xbd, 0x01, 0x39, 0x91, 0x90, - 0xbf, 0x0e, 0x05, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xa5, 0x20, 0xe7, 0xde, 0x21, - 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4, - 0x53, 0x13, 0x45, 0xab, 0x46, 0x44, 0x1d, 0x5b, 0xf7, 0x6e, 0xbc, 0x15, 0x10, 0x8f, 0x1f, 0x46, - 0x7c, 0x35, 0xb6, 0xb9, 0x0c, 0x2c, 0x45, 0x99, 0xff, 0x32, 0x20, 0x52, 0xc5, 0x9d, 0x9f, 0x11, - 0xf7, 0xf0, 0x96, 0xe3, 0x1d, 0xab, 0x6d, 0x8d, 0xcc, 0xd9, 0x57, 0x70, 0x1c, 0x51, 0x8c, 0x7b, - 0x1e, 0x32, 0xd3, 0x3d, 0x0f, 0x5c, 0x61, 0xcb, 0xf7, 0x02, 0xc7, 0xeb, 0x8d, 0xdc, 0xb6, 0x4d, - 0x05, 0xc7, 0x11, 0x05, 0x4f, 0x44, 0x28, 0xe9, 0x58, 0x8e, 0xe7, 0x78, 0x6d, 0xbe, 0x88, 0x4d, - 0xbf, 0xe7, 0x05, 0xe2, 0x45, 0x56, 0x89, 0x08, 0x1e, 0xc1, 0xe2, 0x31, 0x1c, 0xe6, 0xbf, 0x73, - 0x50, 0xe2, 0x6b, 0x0e, 0xdf, 0xb9, 0xe7, 0xa1, 0xe2, 0xea, 0x5e, 0xa0, 0xd6, 0x7e, 0x51, 0x99, - 0x92, 0xbc, 0xd7, 0x38, 0x49, 0xcb, 0x99, 0x45, 0x0a, 0x15, 0x31, 0x67, 0x92, 0xcc, 0x5b, 0x3a, - 0x12, 0x27, 0x69, 0x79, 0xf4, 0xba, 0xc7, 0xef, 0x87, 0xca, 0x4c, 0xa2, 0x23, 0xfa, 0x26, 0x07, - 0x62, 0x89, 0x43, 0x3b, 0x70, 0xc1, 0x72, 0x5d, 0xff, 0x9e, 0x00, 0x36, 0x7c, 0xff, 0xb8, 0x63, - 0xd1, 0x63, 0x26, 0x8a, 0xe9, 0x42, 0xe3, 0x0b, 0x8a, 0xe5, 0xc2, 0xc6, 0x28, 0x09, 0x1e, 0xc7, - 0x37, 0xee, 0xd8, 0x72, 0x53, 0x1e, 0xdb, 0x11, 0x2c, 0x0f, 0x81, 0xc4, 0x2d, 0x57, 0x95, 0xed, - 0x33, 0x4a, 0xce, 0x32, 0x1e, 0x43, 0x73, 0x36, 0x01, 0x8e, 0xc7, 0x4a, 0x44, 0xd7, 0x61, 0x9e, - 0x7b, 0xb2, 0xdf, 0x0b, 0xc2, 0xbc, 0x33, 0x2f, 0x8e, 0x1b, 0x9d, 0x0e, 0x6a, 0xf3, 0xb7, 0x13, - 0x18, 0x3c, 0x44, 0xc9, 0x37, 0xd7, 0x75, 0x3a, 0x4e, 0x50, 0x9d, 0x13, 0x2c, 0xd1, 0xe6, 0xde, - 0xe2, 0x40, 0x2c, 0x71, 0x09, 0x0f, 0x2c, 0x9c, 0xeb, 0x81, 0x9b, 0xb0, 0xc4, 0x88, 0x67, 0x6f, - 0x7b, 0x4e, 0xe0, 0x58, 0xee, 0x8d, 0x13, 0x91, 0x55, 0x96, 0xc4, 0x41, 0x5c, 0xe4, 0x29, 0xe1, - 0xfe, 0x30, 0x12, 0x8f, 0xd2, 0x9b, 0x7f, 0xce, 0x02, 0x92, 0x09, 0xbb, 0x2d, 0x93, 0x32, 0x19, - 0x17, 0x79, 0x59, 0xa1, 0x12, 0x7e, 0x63, 0xa8, 0xac, 0x50, 0xb9, 0x7e, 0x88, 0x47, 0x3b, 0x50, - 0x94, 0xf1, 0x29, 0xbe, 0x73, 0xeb, 0x8a, 0xb8, 0xb8, 0x1b, 0x22, 0xce, 0x06, 0xb5, 0x95, 0x84, - 0x9a, 0x08, 0x23, 0x4a, 0xbe, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1d, 0xbd, 0xe9, 0x57, 0x8c, - 0x5b, 0x3f, 0x71, 0xf9, 0x8e, 0x35, 0x2a, 0xf4, 0x12, 0xe4, 0x82, 0x4f, 0x57, 0x96, 0x15, 0x44, - 0xd5, 0xc9, 0x8b, 0x30, 0x21, 0x81, 0x6b, 0x17, 0x97, 0x82, 0x71, 0xb3, 0x54, 0x45, 0x15, 0x69, - 0xdf, 0x8a, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xc2, 0xa1, 0xca, 0x67, 0xc5, 0xe9, 0xa6, 0x8e, - 0xb3, 0x61, 0x16, 0x2c, 0xfb, 0x0e, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x89, 0xf5, 0x0e, - 0xa2, 0x14, 0x40, 0xba, 0x44, 0xf4, 0xde, 0xee, 0xc7, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x42, 0x71, - 0xc7, 0x69, 0x51, 0x5f, 0x14, 0x92, 0x4f, 0xc0, 0x1c, 0x4b, 0x54, 0x49, 0xd1, 0x49, 0x86, 0xae, - 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x5a, 0x28, 0x1f, 0xfb, 0xe8, 0xab, 0x1c, 0x88, - 0x25, 0xee, 0xfa, 0x32, 0xcf, 0x32, 0x7e, 0xfa, 0x7e, 0x6d, 0xe6, 0xdd, 0xf7, 0x6b, 0x33, 0xef, - 0xbd, 0xaf, 0x32, 0x8e, 0x3f, 0x00, 0xc0, 0xee, 0xc1, 0xf7, 0x48, 0x4b, 0xc6, 0xee, 0x54, 0xbd, - 0xc1, 0xb0, 0x25, 0x2d, 0x7a, 0x83, 0x99, 0xa1, 0xcc, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, 0x87, - 0x62, 0xd4, 0xf5, 0x53, 0xfe, 0xb1, 0x14, 0xfa, 0x5b, 0xd4, 0x1a, 0xc4, 0x31, 0x4d, 0xe2, 0x21, - 0xc9, 0x9d, 0xfb, 0x90, 0x34, 0x20, 0xdb, 0x73, 0x6c, 0x55, 0x75, 0x3f, 0x1d, 0x3e, 0xe4, 0x77, - 0xb6, 0x9b, 0x67, 0x83, 0xda, 0x23, 0x93, 0x9a, 0xed, 0x41, 0xbf, 0x4b, 0x58, 0xfd, 0xce, 0x76, - 0x13, 0x73, 0xe6, 0x71, 0x51, 0x6d, 0x76, 0xca, 0xa8, 0x76, 0x15, 0xa0, 0x1d, 0xf7, 0x2e, 0x64, - 0xd0, 0x88, 0x1c, 0x51, 0xeb, 0x59, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xd7, 0xf7, 0xaa, 0x87, - 0xc0, 0x02, 0xab, 0x23, 0xbb, 0xa1, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, 0xb0, 0x30, - 0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x32, 0x33, 0x56, 0x5a, 0x9c, 0x5a, 0xa9, 0x88, 0x58, - 0xcd, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x2e, 0xac, 0x84, 0xc0, 0xd1, 0x5a, 0x5f, 0x44, 0xfd, - 0x6c, 0x63, 0xf5, 0x74, 0x50, 0x5b, 0x69, 0x4e, 0xa4, 0xc2, 0xf7, 0x91, 0x80, 0x6c, 0x98, 0x75, - 0x65, 0x96, 0x5c, 0x12, 0x99, 0xcd, 0xd7, 0xd2, 0xad, 0x22, 0xf6, 0xfe, 0xba, 0x9e, 0x1d, 0x47, - 0x7d, 0x1b, 0x95, 0x18, 0x2b, 0xd9, 0xe8, 0x2d, 0x28, 0x59, 0x9e, 0xe7, 0x07, 0x96, 0xec, 0x3e, - 0x94, 0x85, 0xaa, 0x8d, 0xa9, 0x55, 0x6d, 0xc4, 0x32, 0x86, 0xb2, 0x71, 0x0d, 0x83, 0x75, 0x55, - 0xe8, 0x1e, 0x2c, 0xf8, 0xf7, 0x3c, 0x42, 0x31, 0x39, 0x24, 0x94, 0x78, 0x2d, 0xc2, 0xaa, 0x15, - 0xa1, 0xfd, 0x99, 0x94, 0xda, 0x13, 0xcc, 0xb1, 0x4b, 0x27, 0xe1, 0x0c, 0x0f, 0x6b, 0x41, 0x75, - 0x1e, 0x5b, 0x3d, 0xcb, 0x75, 0xbe, 0x4f, 0x28, 0xab, 0xce, 0xc7, 0x0d, 0xeb, 0xad, 0x08, 0x8a, - 0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x7f, 0x32, 0xaa, 0x4b, 0xc2, 0xcc, 0x6b, 0xe9, 0xcc, 0x1c, - 0x7d, 0xd4, 0xe2, 0x34, 0x28, 0x81, 0xc3, 0x49, 0x2d, 0x2b, 0xcf, 0x41, 0xe9, 0x53, 0x56, 0x08, - 0xbc, 0xc2, 0x18, 0x3e, 0x90, 0xa9, 0x2a, 0x8c, 0x3f, 0x66, 0x60, 0x3e, 0xb9, 0x8d, 0x43, 0xcf, - 0x61, 0x3e, 0xd5, 0x73, 0x18, 0xd6, 0xb2, 0xc6, 0xc4, 0xc9, 0x45, 0x18, 0x9f, 0xb3, 0x13, 0xe3, - 0xb3, 0x0a, 0x83, 0xb9, 0x07, 0x09, 0x83, 0x75, 0x00, 0x9e, 0xac, 0x50, 0xdf, 0x75, 0x09, 0x15, - 0x11, 0xb0, 0xa0, 0x26, 0x14, 0x11, 0x14, 0x6b, 0x14, 0x3c, 0xa5, 0x3e, 0x70, 0xfd, 0xd6, 0xb1, - 0xd8, 0x82, 0xf0, 0xf6, 0x8a, 0xd8, 0x57, 0x90, 0x29, 0x75, 0x63, 0x04, 0x8b, 0xc7, 0x70, 0x98, - 0x7d, 0xb8, 0xb8, 0x67, 0x51, 0x9e, 0xe4, 0xc4, 0x37, 0x45, 0xd4, 0x2c, 0x6f, 0x8c, 0x54, 0x44, - 0x4f, 0x4f, 0x7b, 0xe3, 0xe2, 0xcd, 0x8f, 0x61, 0x71, 0x55, 0x64, 0xfe, 0xd5, 0x80, 0x4b, 0x63, - 0x75, 0x7f, 0x06, 0x15, 0xd9, 0x1b, 0xc9, 0x8a, 0xec, 0xf9, 0x94, 0xad, 0xcc, 0x71, 0xd6, 0x4e, - 0xa8, 0xcf, 0xe6, 0x20, 0xbf, 0xc7, 0x33, 0x61, 0xf3, 0x43, 0x03, 0xca, 0xe2, 0xd7, 0x34, 0x9d, - 0xe4, 0x5a, 0x72, 0xc0, 0x50, 0x7c, 0x78, 0xc3, 0x85, 0x87, 0xd1, 0x6a, 0x7e, 0xc7, 0x80, 0x64, - 0x0f, 0x17, 0xbd, 0x28, 0xaf, 0x80, 0x11, 0x35, 0x59, 0xa7, 0x74, 0xff, 0x17, 0x26, 0x95, 0xa4, - 0x17, 0x52, 0x75, 0x2b, 0x9f, 0x84, 0x22, 0xf6, 0xfd, 0x60, 0xcf, 0x0a, 0x8e, 0x18, 0xdf, 0xbb, - 0x2e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, 0x7f, 0x6e, 0xc0, 0xa5, 0x89, - 0x73, 0x23, 0x1e, 0x45, 0x5a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, 0x78, - 0x2d, 0x99, 0x18, 0x36, 0x0d, 0xd7, 0x92, 0x09, 0x6d, 0x38, 0x49, 0x6b, 0xfe, 0x33, 0x03, 0x6a, - 0x50, 0xf3, 0x3f, 0x76, 0xfa, 0xc7, 0x87, 0xc6, 0x44, 0xf3, 0xc9, 0x31, 0x51, 0x34, 0x13, 0xd2, - 0xe6, 0x24, 0xd9, 0xfb, 0xcf, 0x49, 0xd0, 0xb3, 0xd1, 0xe8, 0x45, 0xfa, 0xd0, 0x6a, 0x72, 0xf4, - 0x72, 0x36, 0xa8, 0x95, 0x95, 0xf0, 0xe4, 0x28, 0xe6, 0x35, 0x98, 0xb3, 0x49, 0x60, 0x39, 0xae, - 0xac, 0x0b, 0x53, 0x0f, 0x13, 0xa4, 0xb0, 0xa6, 0x64, 0x6d, 0x94, 0xb8, 0x4d, 0xea, 0x03, 0x87, - 0x02, 0x79, 0xc0, 0x6e, 0xf9, 0xb6, 0xac, 0x48, 0xf2, 0x71, 0xc0, 0xde, 0xf4, 0x6d, 0x82, 0x05, - 0xc6, 0x7c, 0xd7, 0x80, 0x92, 0x94, 0xb4, 0x69, 0xf5, 0x18, 0x41, 0x57, 0xa2, 0x55, 0xc8, 0xe3, - 0xbe, 0xa4, 0xcf, 0xd8, 0xce, 0x06, 0xb5, 0xa2, 0x20, 0x13, 0xc5, 0xcc, 0x98, 0x59, 0x52, 0xe6, - 0x9c, 0x3d, 0x7a, 0x14, 0xf2, 0xe2, 0x02, 0xa9, 0xcd, 0x8c, 0x87, 0x85, 0x1c, 0x88, 0x25, 0xce, - 0xfc, 0x38, 0x03, 0x95, 0xc4, 0xe2, 0x52, 0xd4, 0x05, 0x51, 0x0b, 0x35, 0x93, 0xa2, 0x2d, 0x3f, - 0x79, 0x34, 0xaf, 0x9e, 0xaf, 0xd9, 0x07, 0x79, 0xbe, 0xbe, 0x0d, 0xb3, 0x2d, 0xbe, 0x47, 0xe1, - 0x3f, 0x3d, 0xae, 0x4c, 0x73, 0x9c, 0x62, 0x77, 0x63, 0x6f, 0x14, 0x9f, 0x0c, 0x2b, 0x81, 0xe8, - 0x26, 0x2c, 0x51, 0x12, 0xd0, 0xfe, 0xc6, 0x61, 0x40, 0xa8, 0xde, 0x4c, 0xc8, 0xc7, 0xd9, 0x37, - 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, 0x34, 0x1e, 0xc3, 0x50, - 0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, 0xed, 0xb6, 0x8e, 0x3c, - 0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x1e, 0x84, 0x93, 0x22, 0x4c, 0x17, 0x72, 0x9f, 0x61, 0x25, - 0xf9, 0x1d, 0x28, 0xc6, 0xb9, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, 0xee, 0xf1, 0x61, 0x8d, - 0x7a, 0x4e, 0x96, 0x94, 0xcc, 0xbd, 0x32, 0x69, 0x72, 0x2f, 0x31, 0x64, 0xbd, 0xd3, 0xb5, 0x1f, - 0x70, 0xc8, 0x9a, 0x79, 0x90, 0x97, 0x2f, 0x3b, 0xe5, 0xcb, 0x77, 0x15, 0xe4, 0x1f, 0x51, 0xf8, - 0x23, 0x23, 0x13, 0x08, 0xed, 0x91, 0xd1, 0xdf, 0x7f, 0x6d, 0xc2, 0xf0, 0x63, 0x03, 0x40, 0xb4, - 0xf2, 0x44, 0x1b, 0x29, 0xc5, 0x38, 0xff, 0x0e, 0xcc, 0xfa, 0xd2, 0x23, 0xe5, 0xa0, 0x75, 0xca, - 0x7e, 0x71, 0x74, 0x91, 0xa4, 0x4f, 0x62, 0x25, 0xac, 0xf1, 0xf2, 0x07, 0x9f, 0xac, 0xce, 0x7c, - 0xf8, 0xc9, 0xea, 0xcc, 0x47, 0x9f, 0xac, 0xce, 0xbc, 0x7d, 0xba, 0x6a, 0x7c, 0x70, 0xba, 0x6a, - 0x7c, 0x78, 0xba, 0x6a, 0x7c, 0x74, 0xba, 0x6a, 0x7c, 0x7c, 0xba, 0x6a, 0xbc, 0xfb, 0xf7, 0xd5, - 0x99, 0xd7, 0x1e, 0x4b, 0xf3, 0x07, 0xbf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x82, 0xff, - 0xd4, 0x07, 0x28, 0x00, 0x00, + // 2873 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57, + 0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa, + 0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44, + 0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49, + 0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f, + 0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84, + 0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7, + 0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e, + 0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26, + 0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03, + 0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5, + 0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf, + 0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b, + 0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e, + 0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76, + 0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f, + 0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28, + 0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26, + 0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28, + 0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29, + 0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f, + 0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, + 0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d, + 0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86, + 0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5, + 0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, + 0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e, + 0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a, + 0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb, + 0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71, + 0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53, + 0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7, + 0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, + 0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5, + 0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95, + 0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b, + 0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8, + 0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75, + 0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b, + 0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09, + 0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, + 0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3, + 0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3, + 0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74, + 0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81, + 0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2, + 0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22, + 0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1, + 0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60, + 0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6, + 0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1, + 0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82, + 0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8, + 0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, + 0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56, + 0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e, + 0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47, + 0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc, + 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79, + 0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13, + 0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77, + 0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a, + 0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e, + 0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45, + 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98, + 0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3, + 0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e, + 0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18, + 0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d, + 0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69, + 0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91, + 0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54, + 0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63, + 0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b, + 0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92, + 0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23, + 0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57, + 0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b, + 0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a, + 0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f, + 0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c, + 0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f, + 0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e, + 0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41, + 0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37, + 0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf, + 0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73, + 0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15, + 0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88, + 0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69, + 0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7, + 0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d, + 0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7, + 0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7, + 0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14, + 0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1, + 0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17, + 0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc, + 0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f, + 0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98, + 0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b, + 0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51, + 0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c, + 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a, + 0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3, + 0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e, + 0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd, + 0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf, + 0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b, + 0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65, + 0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6, + 0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5, + 0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59, + 0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc, + 0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17, + 0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6, + 0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75, + 0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43, + 0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86, + 0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c, + 0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53, + 0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54, + 0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, + 0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2, + 0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91, + 0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77, + 0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4, + 0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1, + 0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0, + 0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8, + 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8, + 0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30, + 0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a, + 0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39, + 0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb, + 0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95, + 0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0, + 0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04, + 0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66, + 0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d, + 0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0, + 0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1, + 0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09, + 0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd, + 0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70, + 0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a, + 0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2, + 0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5, + 0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9, + 0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80, + 0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a, + 0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef, + 0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2, + 0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, + 0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06, + 0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f, + 0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a, + 0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81, + 0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52, + 0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f, + 0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad, + 0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78, + 0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03, + 0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52, + 0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a, + 0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19, + 0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90, + 0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8, + 0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda, + 0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98, + 0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73, + 0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0, + 0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82, + 0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc, + 0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f, + 0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f, + 0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31, + 0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31, + 0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2025,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3714,6 +3786,25 @@ func (m *Duration) Size() (n int) { return n } +func (m *FieldSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *FieldsV1) Size() (n int) { if m == nil { return 0 @@ -4429,6 +4520,18 @@ func (this *Duration) String() string { }, "") return s } +func (this *FieldSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *GetOptions) String() string { if this == nil { return "nil" @@ -6443,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 2b95700f72..18dd0b067c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -324,6 +324,25 @@ message Duration { optional int64 duration = 1; } +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message FieldSelectorRequirement { + // key is the field selector key that the requirement applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + optional string operator = 2; + + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + repeated string values = 3; +} + // FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. // // Each key is either a '.' representing the field itself, and will always map to an empty set, @@ -460,7 +479,7 @@ message List { optional ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // ListMeta describes metadata that synthetic resources must have, including lists and @@ -1209,6 +1228,6 @@ message WatchEvent { // * If Type is Deleted: the state of the object immediately before deletion. // * If Type is Error: *Status is recommended; other types may make sense // depending on context. - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 592dcb8a74..c748071ed7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" + utiljson "k8s.io/apimachinery/pkg/util/json" ) // LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements @@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) { if f.Raw == nil { return []byte("null"), nil } + if f.getContentType() == fieldsV1InvalidOrValidCBORObject { + var u map[string]interface{} + if err := cbor.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err) + } + return utiljson.Marshal(u) + } return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler func (f *FieldsV1) UnmarshalJSON(b []byte) error { if f == nil { - return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer") } if !bytes.Equal(b, []byte("null")) { f.Raw = append(f.Raw[0:0], b...) @@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error { var _ json.Marshaler = FieldsV1{} var _ json.Unmarshaler = &FieldsV1{} + +func (f FieldsV1) MarshalCBOR() ([]byte, error) { + if f.Raw == nil { + return cbor.Marshal(nil) + } + if f.getContentType() == fieldsV1InvalidOrValidJSONObject { + var u map[string]interface{} + if err := utiljson.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err) + } + return cbor.Marshal(u) + } + return f.Raw, nil +} + +var cborNull = []byte{0xf6} + +func (f *FieldsV1) UnmarshalCBOR(b []byte) error { + if f == nil { + return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(b, cborNull) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +const ( + // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw + // bytes don't represent an allowable value in any supported encoding. + fieldsV1InvalidOrEmpty = iota + + // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that + // are a valid JSON encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidJSONObject + + // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that + // are a valid CBOR encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidCBORObject +) + +// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject, +// fieldsV1InvalidOrValidCBORObject based on the value of Raw. +// +// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map) +// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty +// and represents an allowable value, then the initial byte unambiguously distinguishes a +// JSON-encoded value from a CBOR-encoded value. +// +// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first +// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid +// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map", +// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or +// 0xc6...0xdb). The two sets of valid initial bytes don't intersect. +func (f FieldsV1) getContentType() int { + if len(f.Raw) > 0 { + p := f.Raw[0] + switch p { + case 'n', '{', '\t', '\r', '\n', ' ': + return fieldsV1InvalidOrValidJSONObject + case 0xf6: // null + return fieldsV1InvalidOrValidCBORObject + default: + if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ { + return fieldsV1InvalidOrValidCBORObject + } + } + } + return fieldsV1InvalidOrEmpty +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 8eb37f4367..9f302b3f36 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error { return nil } +func (t *MicroTime) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(RFC3339Micro, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *MicroTime) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } +func (t MicroTime) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + return cbor.Marshal(t.UTC().Format(RFC3339Micro)) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 421770d432..0333cfdb33 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) // Time is a wrapper around time.Time which supports correct @@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error { return nil } +func (t *Time) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(time.RFC3339, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *Time) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +func (t Time) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + + return cbor.Marshal(t.UTC().Format(time.RFC3339)) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (t Time) ToUnstructured() interface{} { if t.IsZero() { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 9695ba50b4..473adb9ef5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1278,6 +1278,33 @@ const ( LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type FieldSelectorRequirement struct { + // key is the field selector key that the requirement applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"` + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A field selector operator is the set of operators that can be used in a selector requirement. +type FieldSelectorOperator string + +const ( + FieldSelectorOpIn FieldSelectorOperator = "In" + FieldSelectorOpNotIn FieldSelectorOperator = "NotIn" + FieldSelectorOpExists FieldSelectorOperator = "Exists" + FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist" +) + // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource // that the fieldset applies to. type ManagedFieldsEntry struct { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index b736e83712..1fa37215cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -135,6 +135,17 @@ func (DeleteOptions) SwaggerDoc() map[string]string { return map_DeleteOptions } +var map_FieldSelectorRequirement = map[string]string{ + "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the field selector key that the requirement applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", +} + +func (FieldSelectorRequirement) SwaggerDoc() map[string]string { + return map_FieldSelectorRequirement +} + var map_FieldsV1 = map[string]string{ "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index a0f709ad86..3eba5ba541 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -32,6 +32,10 @@ import ( type LabelSelectorValidationOptions struct { // Allow invalid label value in selector AllowInvalidLabelValueInSelector bool + + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool } // LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. @@ -79,7 +83,9 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) } default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } } allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) if !opts.AllowInvalidLabelValueInSelector { @@ -113,6 +119,39 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi return allErrs } +// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options +type FieldSelectorValidationOptions struct { + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool +} + +// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors. +func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(requirement.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified")) + } + + switch requirement.Operator { + case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn: + if len(requirement.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist: + if len(requirement.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator")) + } + } + + return allErrs +} + func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { allErrs := field.ErrorList{} //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 7d29c504ab..90cc54a7e7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -327,6 +327,27 @@ func (in *Duration) DeepCopy() *Duration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement. +func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement { + if in == nil { + return nil + } + out := new(FieldSelectorRequirement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index d14d42591b..fcec553542 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -33,9 +33,9 @@ message PartialObjectMetadataList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; // items contains each of the included items. - repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 5e60142405..9e22a00564 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -45,6 +45,19 @@ var ( // Requirements is AND of all requirements. type Requirements []Requirement +func (r Requirements) String() string { + var sb strings.Builder + + for i, requirement := range r { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(requirement.String()) + } + + return sb.String() +} + // Selector represents a label selector. type Selector interface { // Matches returns true if this selector matches the given set of labels. @@ -285,6 +298,13 @@ func (r *Requirement) Values() sets.String { return ret } +// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting. +func (r *Requirement) ValuesUnsorted() []string { + ret := make([]string, 0, len(r.strValues)) + ret = append(ret, r.strValues...) + return ret +} + // Equal checks the equality of requirement. func (r Requirement) Equal(x Requirement) bool { if r.key != x.key { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 9056397fa5..60c000bcb7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -18,16 +18,77 @@ package runtime import ( "bytes" - "encoding/json" "errors" + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/util/json" ) +// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the +// signature of ToUnstructured does not allow returning an error value in cases where the conversion +// is not possible (content type is unrecognized or bytes don't match content type). +func rawToUnstructured(raw []byte, contentType string) (interface{}, error) { + switch contentType { + case ContentTypeJSON: + var u interface{} + if err := json.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err) + } + return u, nil + case ContentTypeCBOR: + var u interface{} + if err := cbor.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err) + } + return u, nil + default: + return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured") + } +} + +func (re RawExtension) guessContentType() string { + switch { + case bytes.HasPrefix(re.Raw, cborSelfDescribed): + return ContentTypeCBOR + case len(re.Raw) > 0: + switch re.Raw[0] { + case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null. + return ContentTypeJSON + } + } + return "" +} + func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - if !bytes.Equal(in, []byte("null")) { - re.Raw = append(re.Raw[0:0], in...) + if bytes.Equal(in, []byte("null")) { + return nil + } + re.Raw = append(re.Raw[0:0], in...) + return nil +} + +var ( + cborNull = []byte{0xf6} + cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7} +) + +func (re *RawExtension) UnmarshalCBOR(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(in, cborNull) { + if !bytes.HasPrefix(in, cborSelfDescribed) { + // The self-described CBOR tag doesn't change the interpretation of the data + // item it encloses, but it is useful as a magic number. Its encoding is + // also what is used to implement the CBOR RecognizingDecoder. + re.Raw = append(re.Raw[:0], cborSelfDescribed...) + } + re.Raw = append(re.Raw, in...) } return nil } @@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) { } return []byte("null"), nil } - // TODO: Check whether ContentType is actually JSON before returning it. - return re.Raw, nil + + contentType := re.guessContentType() + if contentType == ContentTypeJSON { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return json.Marshal(u) +} + +func (re RawExtension) MarshalCBOR() ([]byte, error) { + if re.Raw == nil { + if re.Object != nil { + return cbor.Marshal(re.Object) + } + return cbor.Marshal(nil) + } + + contentType := re.guessContentType() + if contentType == ContentTypeCBOR { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return cbor.Marshal(u) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go new file mode 100644 index 0000000000..cd78b1df26 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and +// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular, +// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions. +package direct + +import ( + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" +) + +func Marshal(src interface{}) ([]byte, error) { + return modes.Encode.Marshal(src) +} + +func Unmarshal(src []byte, dst interface{}) error { + return modes.Decode.Unmarshal(src, dst) +} + +func Diagnose(src []byte) (string, error) { + return modes.Diagnostic.Diagnose(src) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go new file mode 100644 index 0000000000..f14cbd6b58 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "bytes" + "sync" +) + +var buffers = BufferProvider{p: new(sync.Pool)} + +type buffer struct { + bytes.Buffer +} + +type pool interface { + Get() interface{} + Put(interface{}) +} + +type BufferProvider struct { + p pool +} + +func (b *BufferProvider) Get() *buffer { + if buf, ok := b.p.Get().(*buffer); ok { + return buf + } + return &buffer{} +} + +func (b *BufferProvider) Put(buf *buffer) { + if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ { + // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption + // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as + // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small" + // objects very frequently and much larger objects (especially large lists) only + // occasionally. Under steady load, pooled buffers tend to be borrowed frequently + // enough to prevent them from being released. Over time, each buffer is used to + // encode a large object and its capacity increases accordingly. The result is that + // practically all buffers in the pool retain much more capacity than needed to + // encode most objects. + + // As a basic mitigation for the worst case, buffers with more capacity than the + // default max request body size are never returned to the pool. + // TODO: Optimize for higher buffer utilization. + return + } + buf.Reset() + b.p.Put(buf) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go new file mode 100644 index 0000000000..858529e958 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -0,0 +1,422 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "encoding" + "encoding/json" + "errors" + "fmt" + "reflect" + "sync" + + "github.com/fxamacker/cbor/v2" +) + +// Returns a non-nil error if and only if the argument's type (or one of its component types, for +// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing +// cbor.Marshaler and likewise for the respective Unmarshaler interfaces. +// +// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic +// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be +// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree +// types in client programs. +func RejectCustomMarshalers(v interface{}) error { + if v == nil { + return nil + } + rv := reflect.ValueOf(v) + if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + return nil +} + +// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the +// decoder shouldn't be able to contain cycles, but practically any object can be passed to the +// encoder. +var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)") + +// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing +// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of +// this mechanism. +const maxDepth = 2048 + +var marshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Marshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Marshaler](), + reflect.TypeFor[encoding.TextMarshaler](), + }, +} + +var unmarshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Unmarshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Unmarshaler](), + reflect.TypeFor[encoding.TextUnmarshaler](), + }, + assumeAddressableValues: true, +} + +// checker wraps a function for dynamically checking a value of a specific type for custom JSON +// behaviors not matched by a custom CBOR behavior. +type checker struct { + // check returns a non-nil error if the given value might be marshalled to or from CBOR + // using the default behavior for its kind, but marshalled to or from JSON using custom + // behavior. + check func(rv reflect.Value, depth int) error + + // safe returns true if all values of this type are safe from mismatched custom marshalers. + safe func() bool +} + +// TODO: stale +// Having a single addressable checker for comparisons lets us prune and collapse parts of the +// object traversal that are statically known to be safe. Depending on the type, it may be +// unnecessary to inspect each value of that type. For example, no value of the built-in type bool +// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a +// distinct type from bool). +var noop = checker{ + safe: func() bool { + return true + }, + check: func(rv reflect.Value, depth int) error { + return nil + }, +} + +type checkers struct { + m sync.Map // reflect.Type => *checker + + cborInterface reflect.Type + nonCBORInterfaces []reflect.Type + + assumeAddressableValues bool +} + +func (cache *checkers) getChecker(rt reflect.Type) checker { + if ptr, ok := cache.m.Load(rt); ok { + return *ptr.(*checker) + } + + return cache.getCheckerInternal(rt, nil) +} + +// linked list node representing the path from a composite type to an element type +type path struct { + Type reflect.Type + Parent *path +} + +func (p path) cyclic(rt reflect.Type) bool { + for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent { + if ancestor.Type == rt { + return true + } + } + return false +} + +func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) { + // Store a placeholder cache entry first to handle cyclic types. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + c = checker{ + safe: func() bool { + wg.Wait() + return c.safe() + }, + check: func(rv reflect.Value, depth int) error { + wg.Wait() + return c.check(rv, depth) + }, + } + if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + // Someone else stored an entry for this type, use it. + return *actual.(*checker) + } + + // Take a nonreflective path for the unstructured container types. They're common and + // usually nested inside one another. + switch rt { + case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}](): + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + return checkUnstructuredValue(cache, rv.Interface(), depth) + }, + } + } + + // It's possible that one of the relevant interfaces is implemented on a type with a pointer + // receiver, but that a particular value of that type is not addressable. For example: + // + // func (Foo) MarshalText() ([]byte, error) { ... } + // func (*Foo) MarshalCBOR() ([]byte, error) { ... } + // + // Both methods are in the method set of *Foo, but the method set of Foo contains only + // MarshalText. + // + // Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text + // interface with a pointer receiver are always accessible. Only the unmarshaler check + // assumes that CBOR methods with pointer receivers are accessible. + + if rt.Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if rt.Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if reflect.PointerTo(rt).Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + self := &path{Type: rt, Parent: parent} + + switch rt.Kind() { + case reflect.Array: + ce := cache.getCheckerInternal(rt.Elem(), self) + rtlen := rt.Len() + if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rtlen; i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Interface: + // All interface values have to be checked because their dynamic type might + // implement one of the interesting interfaces or be composed of another type that + // does. + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + // Unpacking interfaces must count against recursion depth, + // consider this cycle: + // > var i interface{} + // > var p *interface{} = &i + // > i = p + // > rv := reflect.ValueOf(i) + // > for { + // > rv = rv.Elem() + // > } + if depth <= 0 { + return errMaxDepthExceeded + } + rv = rv.Elem() + return cache.getChecker(rv.Type()).check(rv, depth-1) + }, + } + + case reflect.Map: + rtk := rt.Key() + ck := cache.getCheckerInternal(rtk, self) + rte := rt.Elem() + ce := cache.getCheckerInternal(rte, self) + if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := ck.check(rvk, depth-1); err != nil { + return err + } + rve.SetIterValue(iter) + if err := ce.check(rve, depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Pointer: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + if depth <= 0 { + return errMaxDepthExceeded + } + return ce.check(rv.Elem(), depth-1) + }, + } + + case reflect.Slice: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rv.Len(); i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Struct: + type field struct { + Index int + Checker checker + } + var fields []field + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + cf := cache.getCheckerInternal(f.Type, self) + if !self.cyclic(f.Type) && cf.safe() { + continue + } + fields = append(fields, field{Index: i, Checker: cf}) + } + if len(fields) == 0 { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for _, fi := range fields { + if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil { + return err + } + } + return nil + }, + } + + default: + // Not a serializable composite type (funcs and channels are composite types but are + // rejected by JSON and CBOR serialization). + return noop + + } +} + +func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error { + switch v := v.(type) { + case nil, bool, int64, float64, string: + return nil + case []interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + case map[string]interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + default: + // Unmarshaling an unstructured doesn't use other dynamic types, but nothing + // prevents inserting values with arbitrary dynamic types into unstructured content, + // as long as they can be marshalled. + rv := reflect.ValueOf(v) + return cache.getChecker(rv.Type()).check(rv, depth) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go new file mode 100644 index 0000000000..895b0deff9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "reflect" + + "github.com/fxamacker/cbor/v2" +) + +var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry { + var opts []func(*cbor.SimpleValueRegistry) error + for sv := 0; sv <= 255; sv++ { + // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved + // and considered ill-formed by the CBOR specification. We only accept false (20), + // true (21), and null (22). + switch sv { + case 20: // false + case 21: // true + case 22: // null + case 24, 25, 26, 27, 28, 29, 30, 31: // reserved + default: + opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv))) + } + } + simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...) + if err != nil { + panic(err) + } + return simpleValues +}() + +var Decode cbor.DecMode = func() cbor.DecMode { + decode, err := cbor.DecOptions{ + // Maps with duplicate keys are well-formed but invalid according to the CBOR spec + // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map + // keys are rejected outright and not surfaced as a strict decoding error. + DupMapKey: cbor.DupMapKeyEnforcedAPF, + + // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted + // with or without tagging. If a tag number is present, it must be valid. + TimeTag: cbor.DecTagOptional, + + // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit + // is 10000. + MaxNestedLevels: 64, + + MaxArrayElements: 1024, + MaxMapPairs: 1024, + + // Indefinite-length sequences aren't produced by this serializer, but other + // implementations can. + IndefLength: cbor.IndefLengthAllowed, + + // Accept inputs that contain CBOR tags. + TagsMd: cbor.TagsAllowed, + + // Decode type 0 (unsigned integer) as int64. + // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64. + IntDec: cbor.IntDecConvertSignedOrFail, + + // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for + // decodes into interface{}. + MapKeyByteString: cbor.MapKeyByteStringForbidden, + + // Error on map keys that don't map to a field in the destination struct. + ExtraReturnErrors: cbor.ExtraDecErrorUnknownField, + + // Decode maps into concrete type map[string]interface{} when the destination is an + // interface{}. + DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)), + + // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but + // invalid according to the CBOR spec. Reject invalid inputs. Encoders are + // responsible for ensuring that all text strings they produce contain valid UTF-8 + // sequences and may use the byte string major type to encode strings that have not + // been validated. + UTF8: cbor.UTF8RejectInvalid, + + // Never make a case-insensitive match between a map key and a struct field. + FieldNameMatching: cbor.FieldNameMatchingCaseSensitive, + + // Produce string concrete values when decoding a CBOR byte string into interface{}. + DefaultByteStringType: reflect.TypeOf(""), + + // Allow CBOR byte strings to be decoded into string destination values. If a byte + // string is enclosed in an "expected later encoding" tag + // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text + // encoding indicated by that tag (e.g. base64) will be applied to the contents of + // the byte string. + ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding, + + // Allow CBOR byte strings to match struct fields when appearing as a map key. + FieldNameByteString: cbor.FieldNameByteStringAllowed, + + // When decoding an unrecognized tag to interface{}, return the decoded tag content + // instead of the default, a cbor.Tag representing a (number, content) pair. + UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny, + + // Decode time tags to interface{} as strings containing RFC 3339 timestamps. + TimeTagToAny: cbor.TimeTagToRFC3339Nano, + + // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339 + // timestamps. + ByteStringToTime: cbor.ByteStringToTimeAllowed, + + // Reject NaN and infinite floating-point values since they don't have a JSON + // representation (RFC 8259 Section 6). + NaN: cbor.NaNDecodeForbidden, + Inf: cbor.InfDecodeForbidden, + + // When unmarshaling a byte string into a []byte, assume that the byte string + // contains base64-encoded bytes, unless explicitly counterindicated by an "expected + // later encoding" tag. This is consistent with the because of unmarshaling a JSON + // text into a []byte. + ByteStringExpectedFormat: cbor.ByteStringExpectedBase64, + + // Reject the arbitrary-precision integer tags because they can't be faithfully + // roundtripped through the allowable Unstructured types. + BignumTag: cbor.BignumTagForbidden, + + // Reject anything other than the simple values true, false, and null. + SimpleValues: simpleValues, + + // Disable default recognition of types implementing encoding.BinaryUnmarshaler, + // which is not recognized for JSON decoding. + BinaryUnmarshaler: cbor.BinaryUnmarshalerNone, + }.DecMode() + if err != nil { + panic(err) + } + return decode +}() + +// DecodeLax is derived from Decode, but does not complain about unknown fields in the input. +var DecodeLax cbor.DecMode = func() cbor.DecMode { + opts := Decode.DecOptions() + opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit + dm, err := opts.DecMode() + if err != nil { + panic(err) + } + return dm +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go new file mode 100644 index 0000000000..61f3f145f5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "github.com/fxamacker/cbor/v2" +) + +var Diagnostic cbor.DiagMode = func() cbor.DiagMode { + opts := Decode.DecOptions() + diagnostic, err := cbor.DiagOptions{ + ByteStringText: true, + + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.DiagMode() + if err != nil { + panic(err) + } + return diagnostic +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go new file mode 100644 index 0000000000..c669313844 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go @@ -0,0 +1,155 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "io" + + "github.com/fxamacker/cbor/v2" +) + +var Encode = EncMode{ + delegate: func() cbor.UserBufferEncMode { + encode, err := cbor.EncOptions{ + // Map keys need to be sorted to have deterministic output, and this is the order + // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements". + Sort: cbor.SortBytewiseLexical, + + // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store + // floats in the smallest width that preserves value so that equivalent float32 and + // float64 values encode to identical bytes, as they do in a JSON + // encoding. Satisfies one of the "Core Deterministic Encoding Requirements". + ShortestFloat: cbor.ShortestFloat16, + + // Error on attempt to encode NaN and infinite values. This is what the JSON + // serializer does. + NaNConvert: cbor.NaNConvertReject, + InfConvert: cbor.InfConvertReject, + + // Error on attempt to encode math/big.Int values, which can't be faithfully + // roundtripped through Unstructured in general (the dynamic numeric types allowed + // in Unstructured are limited to float64 and int64). + BigIntConvert: cbor.BigIntConvertReject, + + // MarshalJSON for time.Time writes RFC3339 with nanos. + Time: cbor.TimeRFC3339Nano, + + // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by + // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no + // reliable way of knowing that a particular string originated from serializing a + // time.Time), so producing tag 0 has little use. + TimeTag: cbor.EncTagNone, + + // Indefinite-length items have multiple encodings and aren't being used anyway, so + // disable to avoid an opportunity for nondeterminism. + IndefLength: cbor.IndefLengthForbidden, + + // Preserve distinction between nil and empty for slices and maps. + NilContainers: cbor.NilContainerAsNull, + + // OK to produce tags. + TagsMd: cbor.TagsAllowed, + + // Use the same definition of "empty" as encoding/json. + OmitEmpty: cbor.OmitEmptyGoValue, + + // The CBOR types text string and byte string are structurally equivalent, with the + // semantic difference that a text string whose content is an invalid UTF-8 sequence + // is itself invalid. We reject all invalid text strings at decode time and do not + // validate or sanitize all Go strings at encode time. Encoding Go strings to the + // byte string type is comparable to the existing Protobuf behavior and cheaply + // ensures that the output is valid CBOR. + String: cbor.StringToByteString, + + // Encode struct field names to the byte string type rather than the text string + // type. + FieldName: cbor.FieldNameToByteString, + + // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte + // strings. + ByteArray: cbor.ByteArrayToArray, + + // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64 + // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to + // interoperate with the existing JSON behavior. This indicates to the decoder that, + // when decoding into a string (or unstructured), the resulting value should be the + // base64 encoding of the original bytes. No base64 encoding or decoding needs to be + // performed for []byte-to-CBOR-to-[]byte roundtrips. + ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64, + + // Disable default recognition of types implementing encoding.BinaryMarshaler, which + // is not recognized for JSON encoding. + BinaryMarshaler: cbor.BinaryMarshalerNone, + }.UserBufferEncMode() + if err != nil { + panic(err) + } + return encode + }(), +} + +var EncodeNondeterministic = EncMode{ + delegate: func() cbor.UserBufferEncMode { + opts := Encode.options() + opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0. + em, err := opts.UserBufferEncMode() + if err != nil { + panic(err) + } + return em + }(), +} + +type EncMode struct { + delegate cbor.UserBufferEncMode +} + +func (em EncMode) options() cbor.EncOptions { + return em.delegate.EncOptions() +} + +func (em EncMode) MarshalTo(v interface{}, w io.Writer) error { + if buf, ok := w.(*buffer); ok { + return em.delegate.MarshalToBuffer(v, &buf.Buffer) + } + + buf := buffers.Get() + defer buffers.Put(buf) + if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil { + return err + } + + if _, err := io.Copy(w, buf); err != nil { + return err + } + + return nil +} + +func (em EncMode) Marshal(v interface{}) ([]byte, error) { + buf := buffers.Get() + defer buffers.Put(buf) + + if err := em.MarshalTo(v, &buf.Buffer); err != nil { + return nil, err + } + + clone := make([]byte, buf.Len()) + copy(clone, buf.Bytes()) + + return clone, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index ce77c7910a..1680c149f9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -46,6 +46,7 @@ const ( ContentTypeJSON string = "application/json" ContentTypeYAML string = "application/yaml" ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeCBOR string = "application/cbor" ) // RawExtension is used to hold extensions in external versions. diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 9b3c9c8d5a..1ab8fd396e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see // data written to data, or be larger than data and a different array. - n := len(data) m := json.RawMessage(data[:0]) if err := r.decoder.Decode(&m); err != nil { return 0, err @@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // If capacity of data is less than length of the message, decoder will allocate a new slice // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. - if len(m) > n { - //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. - data = append(data[0:0], m[:n]...) - r.remaining = m[n:] - return n, io.ErrShortBuffer + if len(m) > cap(data) { + copy(data, m) + r.remaining = m[len(data):] + return len(data), io.ErrShortBuffer } + + if len(m) > len(data) { + // The bytes beyond len(data) were stored in data's underlying array, which we do + // not own after this function returns. + r.remaining = append([]byte(nil), m[len(data):]...) + return len(data), io.ErrShortBuffer + } + return len(m), nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index a32fce5a0c..8054b98676 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -116,6 +116,15 @@ func IsUpgradeFailure(err error) bool { return errors.As(err, &upgradeErr) } +// isHTTPSProxyError returns true if error is Gorilla/Websockets HTTPS Proxy dial error; +// false otherwise (see https://github.com/kubernetes/kubernetes/issues/126134). +func IsHTTPSProxyError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "proxy: unknown scheme: https") +} + // IsUpgradeRequest returns true if the given request is a connection upgrade request func IsUpgradeRequest(req *http.Request) bool { for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index f358c794d1..5fd2e16c84 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/klog/v2" ) @@ -92,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { return json.Unmarshal(value, &intstr.IntVal) } +func (intstr *IntOrString) UnmarshalCBOR(value []byte) error { + if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil { + intstr.Type = String + return nil + } + + if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil { + return err + } + + intstr.Type = Int + return nil +} + // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { if intstr == nil { @@ -126,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } +func (intstr IntOrString) MarshalCBOR() ([]byte, error) { + switch intstr.Type { + case Int: + return cbor.Marshal(intstr.IntVal) + case String: + return cbor.Marshal(intstr.StrVal) + default: + return nil, fmt.Errorf("impossible IntOrString.Type") + } +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go index c480ffacbc..786c16e27a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/evanphx/json-patch" + "gopkg.in/evanphx/json-patch.v4" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3674914f70..4fe0c5eb25 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "net/http" "runtime" @@ -35,7 +36,7 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} +var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via // defer. Additional context-specific handlers can be provided, and will be @@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic} // handlers and logging the panic message. // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) + additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) + for i, handler := range additionalHandlers { + handler := handler // capture loop variable + additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) { + handler(r) + } } + + handleCrash(context.Background(), r, additionalHandlersWithContext...) + } +} + +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// The context is used to determine how to log. +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of HandleCrash and HandleCrash. +// Having those call a common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + for _, fn := range PanicHandlers { + fn(ctx, r) + } + for _, fn := range additionalHandlers { + fn(ctx, r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) } } // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { +func logPanic(ctx context.Context, r interface{}) { if r == http.ErrAbortHandler { // honor the http.ErrAbortHandler sentinel panic value: // ErrAbortHandler is a sentinel panic value to abort a handler. @@ -73,10 +105,20 @@ func logPanic(r interface{}) { const size = 64 << 10 stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + + // For backwards compatibility, conversion to string + // is handled here instead of defering to the logging + // backend. if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace)) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace)) } } @@ -84,35 +126,76 @@ func logPanic(r interface{}) { // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ +var ErrorHandlers = []ErrorHandler{ logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, + func(_ context.Context, _ error, _ string, _ ...interface{}) { + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError() + }, } +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) + // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. +// +// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { return } + handleError(context.Background(), err, "Unhandled Error") +} + +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. The context is used to +// determine how to log the error. +// +// If contextual logging is enabled, the default log output is equivalent to +// +// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...) +// +// Without contextual logging, it is equivalent to: +// +// klog.ErrorS(err, msg, keysAndValues...) +// +// In contrast to HandleError, passing nil for the error is still going to +// trigger a log entry. Don't construct a new error or wrap an error +// with fmt.Errorf. Instead, add additional information via the mssage +// and key/value pairs. +// +// This variant should be used instead of HandleError because it supports +// structured, contextual logging. +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + handleError(ctx, err, msg, keysAndValues...) +} + +// handleError is the common implementation of HandleError and HandleErrorWithContext. +// Using this common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { for _, fn := range ErrorHandlers { - fn(err) + fn(ctx, err, msg, keysAndValues...) } } -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - klog.ErrorDepth(2, err) +// logError prints an error with the call stack of the location it was reported. +// It expects to be called as -> HandleError[WithContext] -> handleError -> logError. +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + logger := klog.FromContext(ctx).WithCallDepth(3) + logger = klog.LoggerWithName(logger, "UnhandledError") + logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs. } type rudimentaryErrorBackoff struct { @@ -125,7 +208,7 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { +func (r *rudimentaryErrorBackoff) OnError() { now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() d := now.Sub(r.lastErrorTime) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go index b76129a1ca..cd961c8c59 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -68,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] { // Clear empties the set. // It is preferable to replace the set with a newly constructed set, // but not all callers can do that (when there are other references to the map). -// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN -// can't be cleared because NaN can't be removed. -// For sets containing items of a type that is reflexive for ==, -// this is optimized to a single call to runtime.mapclear(). func (s Set[T]) Clear() Set[T] { - for key := range s { - delete(s, key) - } + clear(s) return s } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 920c113bbd..6825a808e6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1361,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me // original. Otherwise, check if we want to preserve it or skip it. // Preserving the null value is useful when we want to send an explicit // delete to the API server. + // In some cases, this may lead to inconsistent behavior with create. + // ref: https://github.com/kubernetes/kubernetes/issues/123304 + // To avoid breaking compatibility, + // we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent. if patchV == nil { delete(original, k) if mergeOptions.IgnoreUnmatchedNulls { diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 2292ba1376..b7812ff2d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -23,6 +23,8 @@ import ( "regexp" "strconv" "strings" + + apimachineryversion "k8s.io/apimachinery/pkg/version" ) // Version is an opaque representation of a version number @@ -31,6 +33,7 @@ type Version struct { semver bool preRelease string buildMetadata string + info apimachineryversion.Info } var ( @@ -145,6 +148,43 @@ func MustParseGeneric(str string) *Version { return v } +// Parse tries to do ParseSemantic first to keep more information. +// If ParseSemantic fails, it would just do ParseGeneric. +func Parse(str string) (*Version, error) { + v, err := parse(str, true) + if err != nil { + return parse(str, false) + } + return v, err +} + +// MustParse is like Parse except that it panics on error +func MustParse(str string) *Version { + v, err := Parse(str) + if err != nil { + panic(err) + } + return v +} + +// ParseMajorMinor parses a "generic" version string and returns a version with the major and minor version. +func ParseMajorMinor(str string) (*Version, error) { + v, err := ParseGeneric(str) + if err != nil { + return nil, err + } + return MajorMinor(v.Major(), v.Minor()), nil +} + +// MustParseMajorMinor is like ParseMajorMinor except that it panics on error +func MustParseMajorMinor(str string) *Version { + v, err := ParseMajorMinor(str) + if err != nil { + panic(err) + } + return v +} + // ParseSemantic parses a version string that exactly obeys the syntax and semantics of // the "Semantic Versioning" specification (http://semver.org/) (although it ignores // leading and trailing whitespace, and allows the version to be preceded by "v"). For @@ -215,6 +255,32 @@ func (v *Version) WithMinor(minor uint) *Version { return &result } +// SubtractMinor returns the version with offset from the original minor, with the same major and no patch. +// If -offset >= current minor, the minor would be 0. +func (v *Version) OffsetMinor(offset int) *Version { + var minor uint + if offset >= 0 { + minor = v.Minor() + uint(offset) + } else { + diff := uint(-offset) + if diff < v.Minor() { + minor = v.Minor() - diff + } + } + return MajorMinor(v.Major(), minor) +} + +// SubtractMinor returns the version diff minor versions back, with the same major and no patch. +// If diff >= current minor, the minor would be 0. +func (v *Version) SubtractMinor(diff uint) *Version { + return v.OffsetMinor(-int(diff)) +} + +// AddMinor returns the version diff minor versions forward, with the same major and no patch. +func (v *Version) AddMinor(diff uint) *Version { + return v.OffsetMinor(int(diff)) +} + // WithPatch returns copy of the version object with requested patch number func (v *Version) WithPatch(patch uint) *Version { result := *v @@ -224,6 +290,9 @@ func (v *Version) WithPatch(patch uint) *Version { // WithPreRelease returns copy of the version object with requested prerelease func (v *Version) WithPreRelease(preRelease string) *Version { + if len(preRelease) == 0 { + return v + } result := *v result.components = []uint{v.Major(), v.Minor(), v.Patch()} result.preRelease = preRelease @@ -345,6 +414,17 @@ func onlyZeros(array []uint) bool { return true } +// EqualTo tests if a version is equal to a given version. +func (v *Version) EqualTo(other *Version) bool { + if v == nil { + return other == nil + } + if other == nil { + return false + } + return v.compareInternal(other) == 0 +} + // AtLeast tests if a version is at least equal to a given minimum version. If both // Versions are Semantic Versions, this will use the Semantic Version comparison // algorithm. Otherwise, it will compare only the numeric components, with non-present @@ -360,6 +440,11 @@ func (v *Version) LessThan(other *Version) bool { return v.compareInternal(other) == -1 } +// GreaterThan tests if a version is greater than a given version. +func (v *Version) GreaterThan(other *Version) bool { + return v.compareInternal(other) == 1 +} + // Compare compares v against a version string (which will be parsed as either Semantic // or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if // it is greater than other, or 0 if they are equal. @@ -370,3 +455,30 @@ func (v *Version) Compare(other string) (int, error) { } return v.compareInternal(ov), nil } + +// WithInfo returns copy of the version object with requested info +func (v *Version) WithInfo(info apimachineryversion.Info) *Version { + result := *v + result.info = info + return &result +} + +func (v *Version) Info() *apimachineryversion.Info { + if v == nil { + return nil + } + // in case info is empty, or the major and minor in info is different from the actual major and minor + v.info.Major = itoa(v.Major()) + v.info.Minor = itoa(v.Minor()) + if v.info.GitVersion == "" { + v.info.GitVersion = v.String() + } + return &v.info +} + +func itoa(i uint) string { + if i == 0 { + return "" + } + return strconv.Itoa(int(i)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index b6c7bbfa8f..ce37fd8c18 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -27,13 +27,25 @@ import ( // Interface can be implemented by anything that knows how to watch and report changes. type Interface interface { - // Stop stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. + // Stop tells the producer that the consumer is done watching, so the + // producer should stop sending events and close the result channel. The + // consumer should keep watching for events until the result channel is + // closed. + // + // Because some implementations may create channels when constructed, Stop + // must always be called, even if the consumer has not yet called + // ResultChan(). + // + // Only the consumer should call Stop(), not the producer. If the producer + // errors and needs to stop the watch prematurely, it should instead send + // an error event and close the result channel. Stop() - // ResultChan returns a chan which will receive all the events. If an error occurs - // or Stop() is called, the implementation will close this channel and - // release any resources used by the watch. + // ResultChan returns a channel which will receive events from the event + // producer. If an error occurs or Stop() is called, the producer must + // close this channel and release any resources used by the watch. + // Closing the result channel tells the consumer that no more events will be + // sent. ResultChan() <-chan Event } @@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event { func (pw *ProxyWatcher) StopChan() <-chan struct{} { return pw.stopCh } + +// MockWatcher implements watch.Interface with mockable functions. +type MockWatcher struct { + StopFunc func() + ResultChanFunc func() <-chan Event +} + +var _ Interface = &MockWatcher{} + +// Stop calls StopFunc +func (mw MockWatcher) Stop() { + mw.StopFunc() +} + +// ResultChan calls ResultChanFunc +func (mw MockWatcher) ResultChan() <-chan Event { + return mw.ResultChanFunc() +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go index bb5e233d45..06035f6b9e 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go @@ -163,11 +163,12 @@ type variableDeclEnvs map[OptionalVariableDeclarations]*environment.EnvSet // CompileCELExpression returns a compiled CEL expression. // perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input. func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, envType environment.Type) CompilationResult { - resultError := func(errorString string, errType apiservercel.ErrorType) CompilationResult { + resultError := func(errorString string, errType apiservercel.ErrorType, cause error) CompilationResult { return CompilationResult{ Error: &apiservercel.Error{ Type: errType, Detail: errorString, + Cause: cause, }, ExpressionAccessor: expressionAccessor, } @@ -175,12 +176,12 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op env, err := c.varEnvs[options].Env(envType) if err != nil { - return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) + return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal, nil) } ast, issues := env.Compile(expressionAccessor.GetExpression()) if issues != nil { - return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) + return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid, apiservercel.NewCompilationError(issues)) } found := false returnTypes := expressionAccessor.ReturnTypes() @@ -198,19 +199,19 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) } - return resultError(reason, apiservercel.ErrorTypeInvalid) + return resultError(reason, apiservercel.ErrorTypeInvalid, nil) } _, err = cel.AstToCheckedExpr(ast) if err != nil { // should be impossible since env.Compile returned no issues - return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal, nil) } prog, err := env.Program(ast, cel.InterruptCheckFrequency(celconfig.CheckFrequency), ) if err != nil { - return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) + return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal, nil) } return CompilationResult{ Program: prog, diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go index 3e2a63e75c..216a474d23 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go @@ -192,6 +192,7 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione evaluation.Error = &cel.Error{ Type: cel.ErrorTypeInvalid, Detail: fmt.Sprintf("compilation error: %v", compilationResult.Error), + Cause: compilationResult.Error, } continue } @@ -211,6 +212,7 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione return nil, -1, &cel.Error{ Type: cel.ErrorTypeInvalid, Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"), + Cause: cel.ErrOutOfBudget, } } remainingBudget -= compositionCost @@ -228,12 +230,14 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione return nil, -1, &cel.Error{ Type: cel.ErrorTypeInvalid, Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()), + Cause: cel.ErrOutOfBudget, } } else { if *rtCost > math.MaxInt64 || int64(*rtCost) > remainingBudget { return nil, -1, &cel.Error{ Type: cel.ErrorTypeInvalid, Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"), + Cause: cel.ErrOutOfBudget, } } remainingBudget -= int64(*rtCost) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go index 4334c0dd82..f08e1cc559 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go @@ -40,7 +40,7 @@ var _ Controller[runtime.Object] = &controller[runtime.Object]{} type controller[T runtime.Object] struct { informer Informer[T] - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // Returns an error if there was a transient error during reconciliation // and the object should be tried again later. @@ -99,7 +99,10 @@ func (c *controller[T]) Run(ctx context.Context) error { klog.Infof("starting %s", c.options.Name) defer klog.Infof("stopping %s", c.options.Name) - c.queue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), c.options.Name) + c.queue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: c.options.Name}, + ) // Forcefully shutdown workqueue. Drop any enqueued items. // Important to do this in a `defer` at the start of `Run`. @@ -219,7 +222,7 @@ func (c *controller[T]) runWorker() { } // We wrap this block in a func so we can defer c.workqueue.Done. - err := func(obj interface{}) error { + err := func(obj string) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do @@ -227,19 +230,6 @@ func (c *controller[T]) runWorker() { // put back on the workqueue and attempted again after a back-off // period. defer c.queue.Done(obj) - var key string - var ok bool - // We expect strings to come off the workqueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workqueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workqueue. - if key, ok = obj.(string); !ok { - // How did an incorrectly formatted key get in the workqueue? - // Done is sufficient. (Forget resets rate limiter for the key, - // but the key is invalid so there is no point in doing that) - return fmt.Errorf("expected string in workqueue but got %#v", obj) - } defer c.hasProcessed.Finished(key) if err := c.reconcile(key); err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go index fbefd595e5..ac13dbeee3 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go @@ -22,6 +22,8 @@ import ( "sort" "strings" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" ) @@ -58,6 +60,8 @@ var _ authorizer.Attributes = (interface { GetAPIVersion() string IsResourceRequest() bool GetPath() string + GetFieldSelector() (fields.Requirements, error) + GetLabelSelector() (labels.Requirements, error) })(nil) // The user info accessors known to cache key construction. If this fails to compile, the cache @@ -72,16 +76,31 @@ var _ user.Info = (interface { // Authorize returns an authorization decision by delegating to another Authorizer. If an equivalent // check has already been performed, a cached result is returned. Not safe for concurrent use. func (ca *cachingAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { - serializableAttributes := authorizer.AttributesRecord{ - Verb: a.GetVerb(), - Namespace: a.GetNamespace(), - APIGroup: a.GetAPIGroup(), - APIVersion: a.GetAPIVersion(), - Resource: a.GetResource(), - Subresource: a.GetSubresource(), - Name: a.GetName(), - ResourceRequest: a.IsResourceRequest(), - Path: a.GetPath(), + type SerializableAttributes struct { + authorizer.AttributesRecord + LabelSelector string + } + + serializableAttributes := SerializableAttributes{ + AttributesRecord: authorizer.AttributesRecord{ + Verb: a.GetVerb(), + Namespace: a.GetNamespace(), + APIGroup: a.GetAPIGroup(), + APIVersion: a.GetAPIVersion(), + Resource: a.GetResource(), + Subresource: a.GetSubresource(), + Name: a.GetName(), + ResourceRequest: a.IsResourceRequest(), + Path: a.GetPath(), + }, + } + // in the error case, we won't honor this field selector, so the cache doesn't need it. + if fieldSelector, err := a.GetFieldSelector(); len(fieldSelector) > 0 { + serializableAttributes.FieldSelectorRequirements, serializableAttributes.FieldSelectorParsingErr = fieldSelector, err + } + if labelSelector, _ := a.GetLabelSelector(); len(labelSelector) > 0 { + // the labels requirements have private elements so those don't help us serialize to a unique key + serializableAttributes.LabelSelector = labelSelector.String() } if u := a.GetUser(); u != nil { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go index edf5a78867..f060114253 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go @@ -223,7 +223,7 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm switch decision.Action { case ActionAdmit: if decision.Evaluation == EvalError { - celmetrics.Metrics.ObserveAdmissionWithError(ctx, decision.Elapsed, definition.Name, binding.Name, "active") + celmetrics.Metrics.ObserveAdmission(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision)) } case ActionDeny: for _, action := range binding.Spec.ValidationActions { @@ -234,13 +234,13 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm Binding: binding, PolicyDecision: decision, }) - celmetrics.Metrics.ObserveRejection(ctx, decision.Elapsed, definition.Name, binding.Name, "active") + celmetrics.Metrics.ObserveRejection(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision)) case admissionregistrationv1.Audit: publishValidationFailureAnnotation(binding, i, decision, versionedAttr) - celmetrics.Metrics.ObserveAudit(ctx, decision.Elapsed, definition.Name, binding.Name, "active") + celmetrics.Metrics.ObserveAudit(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision)) case admissionregistrationv1.Warn: warning.AddWarning(ctx, "", fmt.Sprintf("Validation failed for ValidatingAdmissionPolicy '%s' with binding '%s': %s", definition.Name, binding.Name, decision.Message)) - celmetrics.Metrics.ObserveWarn(ctx, decision.Elapsed, definition.Name, binding.Name, "active") + celmetrics.Metrics.ObserveWarn(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision)) } } default: @@ -259,7 +259,7 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm auditAnnotationCollector.add(auditAnnotation.Key, value) case AuditAnnotationActionError: // When failurePolicy=fail, audit annotation errors result in deny - deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{ + d := policyDecisionWithMetadata{ Definition: definition, Binding: binding, PolicyDecision: PolicyDecision{ @@ -268,8 +268,9 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm Message: auditAnnotation.Error, Elapsed: auditAnnotation.Elapsed, }, - }) - celmetrics.Metrics.ObserveRejection(ctx, auditAnnotation.Elapsed, definition.Name, binding.Name, "active") + } + deniedDecisions = append(deniedDecisions, d) + celmetrics.Metrics.ObserveRejection(ctx, auditAnnotation.Elapsed, definition.Name, binding.Name, ErrorType(&d.PolicyDecision)) case AuditAnnotationActionExclude: // skip it default: return fmt.Errorf("unsupported AuditAnnotation Action: %s", auditAnnotation.Action) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/errors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/errors.go new file mode 100644 index 0000000000..0fcf40ff0d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/errors.go @@ -0,0 +1,38 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + "strings" + + celmetrics "k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics" +) + +// ErrorType decodes the error to determine the error type +// that the metrics understand. +func ErrorType(decision *PolicyDecision) celmetrics.ValidationErrorType { + if decision.Evaluation == EvalAdmit { + return celmetrics.ValidationNoError + } + if strings.HasPrefix(decision.Message, "compilation") { + return celmetrics.ValidationCompileError + } + if strings.HasPrefix(decision.Message, "validation failed due to running out of cost budget") { + return celmetrics.ValidatingOutOfBudget + } + return celmetrics.ValidatingInvalidError +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/errors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/errors.go new file mode 100644 index 0000000000..4327252610 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/errors.go @@ -0,0 +1,38 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "errors" + + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// ErrorType decodes the error to determine the error type +// that the metrics understand. +func ErrorType(err error) ValidationErrorType { + if err == nil { + return ValidationNoError + } + if errors.Is(err, apiservercel.ErrCompilation) { + return ValidationCompileError + } + if errors.Is(err, apiservercel.ErrOutOfBudget) { + return ValidatingOutOfBudget + } + return ValidatingInvalidError +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go index 9f8a941105..92c8ee2b85 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go @@ -29,6 +29,22 @@ const ( metricsSubsystem = "validating_admission_policy" ) +// ValidationErrorType defines different error types that happen to a validation expression +type ValidationErrorType string + +const ( + // ValidationCompileError indicates that the expression fails to compile. + ValidationCompileError ValidationErrorType = "compile_error" + // ValidatingInvalidError indicates that the expression fails due to internal + // errors that are out of the control of the user. + ValidatingInvalidError ValidationErrorType = "invalid_error" + // ValidatingOutOfBudget indicates that the expression fails due to running + // out of cost budget, or the budget cannot be obtained. + ValidatingOutOfBudget ValidationErrorType = "out_of_budget" + // ValidationNoError indicates that the expression returns without an error. + ValidationNoError ValidationErrorType = "no_error" +) + var ( // Metrics provides access to validation admission metrics. Metrics = newValidationAdmissionMetrics() @@ -36,9 +52,8 @@ var ( // ValidatingAdmissionPolicyMetrics aggregates Prometheus metrics related to validation admission control. type ValidatingAdmissionPolicyMetrics struct { - policyCheck *metrics.CounterVec - policyDefinition *metrics.CounterVec - policyLatency *metrics.HistogramVec + policyCheck *metrics.CounterVec + policyLatency *metrics.HistogramVec } func newValidationAdmissionMetrics() *ValidatingAdmissionPolicyMetrics { @@ -47,25 +62,16 @@ func newValidationAdmissionMetrics() *ValidatingAdmissionPolicyMetrics { Namespace: metricsNamespace, Subsystem: metricsSubsystem, Name: "check_total", - Help: "Validation admission policy check total, labeled by policy and further identified by binding, enforcement action taken, and state.", - StabilityLevel: metrics.ALPHA, + Help: "Validation admission policy check total, labeled by policy and further identified by binding and enforcement action taken.", + StabilityLevel: metrics.BETA, }, - []string{"policy", "policy_binding", "enforcement_action", "state"}, - ) - definition := metrics.NewCounterVec(&metrics.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, - Name: "definition_total", - Help: "Validation admission policy count total, labeled by state and enforcement action.", - StabilityLevel: metrics.ALPHA, - }, - []string{"state", "enforcement_action"}, + []string{"policy", "policy_binding", "error_type", "enforcement_action"}, ) latency := metrics.NewHistogramVec(&metrics.HistogramOpts{ Namespace: metricsNamespace, Subsystem: metricsSubsystem, Name: "check_duration_seconds", - Help: "Validation admission latency for individual validation expressions in seconds, labeled by policy and further including binding, state and enforcement action taken.", + Help: "Validation admission latency for individual validation expressions in seconds, labeled by policy and further including binding and enforcement action taken.", // the bucket distribution here is based oo the benchmark suite at // github.com/DangerOnTheRanger/cel-benchmark performed on 16-core Intel Xeon // the lowest bucket was based around the 180ns/op figure for BenchmarkAccess, @@ -75,49 +81,42 @@ func newValidationAdmissionMetrics() *ValidatingAdmissionPolicyMetrics { // around 760ms, so that bucket should only ever have the slowest CEL expressions // in it Buckets: []float64{0.0000005, 0.001, 0.01, 0.1, 1.0}, - StabilityLevel: metrics.ALPHA, + StabilityLevel: metrics.BETA, }, - []string{"policy", "policy_binding", "enforcement_action", "state"}, + []string{"policy", "policy_binding", "error_type", "enforcement_action"}, ) legacyregistry.MustRegister(check) - legacyregistry.MustRegister(definition) legacyregistry.MustRegister(latency) - return &ValidatingAdmissionPolicyMetrics{policyCheck: check, policyDefinition: definition, policyLatency: latency} + return &ValidatingAdmissionPolicyMetrics{policyCheck: check, policyLatency: latency} } // Reset resets all validation admission-related Prometheus metrics. func (m *ValidatingAdmissionPolicyMetrics) Reset() { m.policyCheck.Reset() - m.policyDefinition.Reset() m.policyLatency.Reset() } -// ObserveDefinition observes a policy definition. -func (m *ValidatingAdmissionPolicyMetrics) ObserveDefinition(ctx context.Context, state, enforcementAction string) { - m.policyDefinition.WithContext(ctx).WithLabelValues(state, enforcementAction).Inc() -} - -// ObserveAdmissionWithError observes a policy validation error that was ignored due to failure policy. -func (m *ValidatingAdmissionPolicyMetrics) ObserveAdmissionWithError(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "allow", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "allow", state).Observe(elapsed.Seconds()) +// ObserveAdmission observes a policy validation, with an optional error to indicate the error that may occur but ignored. +func (m *ValidatingAdmissionPolicyMetrics) ObserveAdmission(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) { + m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "allow").Inc() + m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "allow").Observe(elapsed.Seconds()) } // ObserveRejection observes a policy validation error that was at least one of the reasons for a deny. -func (m *ValidatingAdmissionPolicyMetrics) ObserveRejection(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "deny", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "deny", state).Observe(elapsed.Seconds()) +func (m *ValidatingAdmissionPolicyMetrics) ObserveRejection(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) { + m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "deny").Inc() + m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "deny").Observe(elapsed.Seconds()) } // ObserveAudit observes a policy validation audit annotation was published for a validation failure. -func (m *ValidatingAdmissionPolicyMetrics) ObserveAudit(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "audit", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "audit", state).Observe(elapsed.Seconds()) +func (m *ValidatingAdmissionPolicyMetrics) ObserveAudit(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) { + m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "audit").Inc() + m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "audit").Observe(elapsed.Seconds()) } // ObserveWarn observes a policy validation warning was published for a validation failure. -func (m *ValidatingAdmissionPolicyMetrics) ObserveWarn(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "warn", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "warn", state).Observe(elapsed.Seconds()) +func (m *ValidatingAdmissionPolicyMetrics) ObserveWarn(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) { + m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "warn").Inc() + m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "warn").Observe(elapsed.Seconds()) } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go index fb097737a8..06f4a8c714 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go @@ -19,6 +19,7 @@ package validating import ( "context" "io" + "sync" v1 "k8s.io/api/admissionregistration/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -44,23 +45,34 @@ const ( ) var ( - compositionEnvTemplateWithStrictCost *cel.CompositionEnv = func() *cel.CompositionEnv { - compositionEnvTemplateWithStrictCost, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) + lazyCompositionEnvTemplateWithStrictCostInit sync.Once + lazyCompositionEnvTemplateWithStrictCost *cel.CompositionEnv + + lazyCompositionEnvTemplateWithoutStrictCostInit sync.Once + lazyCompositionEnvTemplateWithoutStrictCost *cel.CompositionEnv +) + +func getCompositionEnvTemplateWithStrictCost() *cel.CompositionEnv { + lazyCompositionEnvTemplateWithStrictCostInit.Do(func() { + env, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) if err != nil { panic(err) } + lazyCompositionEnvTemplateWithStrictCost = env + }) + return lazyCompositionEnvTemplateWithStrictCost +} - return compositionEnvTemplateWithStrictCost - }() - compositionEnvTemplateWithoutStrictCost *cel.CompositionEnv = func() *cel.CompositionEnv { - compositionEnvTemplateWithoutStrictCost, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), false)) +func getCompositionEnvTemplateWithoutStrictCost() *cel.CompositionEnv { + lazyCompositionEnvTemplateWithoutStrictCostInit.Do(func() { + env, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), false)) if err != nil { panic(err) } - - return compositionEnvTemplateWithoutStrictCost - }() -) + lazyCompositionEnvTemplateWithoutStrictCost = env + }) + return lazyCompositionEnvTemplateWithoutStrictCost +} // Register registers a plugin func Register(plugins *admission.Plugins) { @@ -131,9 +143,9 @@ func compilePolicy(policy *Policy) Validator { matchConditions := policy.Spec.MatchConditions var compositionEnvTemplate *cel.CompositionEnv if strictCost { - compositionEnvTemplate = compositionEnvTemplateWithStrictCost + compositionEnvTemplate = getCompositionEnvTemplateWithStrictCost() } else { - compositionEnvTemplate = compositionEnvTemplateWithoutStrictCost + compositionEnvTemplate = getCompositionEnvTemplateWithoutStrictCost() } filterCompiler := cel.NewCompositedCompilerFromTemplate(compositionEnvTemplate) filterCompiler.CompileAndStoreVariables(convertv1beta1Variables(policy.Spec.Variables), optionalVars, environment.StoredExpressions) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go index da2ea1c10f..c429ae22fc 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go @@ -18,6 +18,7 @@ package validating import ( "context" + "errors" "fmt" "strings" @@ -132,19 +133,14 @@ func (v *validator) Validate(ctx context.Context, matchedResource schema.GroupVe } var messageResult *cel.EvaluationResult - var messageError *apiservercel.Error if len(messageResults) > i { messageResult = &messageResults[i] } - messageError, _ = err.(*apiservercel.Error) if evalResult.Error != nil { decision.Action = policyDecisionActionForError(f) decision.Evaluation = EvalError decision.Message = evalResult.Error.Error() - } else if messageError != nil && - (messageError.Type == apiservercel.ErrorTypeInternal || - (messageError.Type == apiservercel.ErrorTypeInvalid && - strings.HasPrefix(messageError.Detail, "validation failed due to running out of cost budget"))) { + } else if errors.Is(err, apiservercel.ErrInternal) || errors.Is(err, apiservercel.ErrOutOfBudget) { decision.Action = policyDecisionActionForError(f) decision.Evaluation = EvalError decision.Message = fmt.Sprintf("failed messageExpression: %s", err) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 437e53dc93..5bf80237f7 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -24,8 +24,8 @@ import ( "fmt" "time" - jsonpatch "github.com/evanphx/json-patch" "go.opentelemetry.io/otel/attribute" + jsonpatch "gopkg.in/evanphx/json-patch.v4" admissionv1 "k8s.io/api/admission/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go index a31b875369..af70fe2446 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -165,6 +165,25 @@ type AuthenticationConfiguration struct { metav1.TypeMeta JWT []JWTAuthenticator + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string } // JWTAuthenticator provides the configuration for a single JWT authenticator. diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index fc75c464a2..214ef4e4fc 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -185,6 +185,25 @@ type AuthenticationConfiguration struct { // "": "username" // } JWT []JWTAuthenticator `json:"jwt"` + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"` +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool `json:"enabled"` + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition `json:"conditions,omitempty"` +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string `json:"path"` } // JWTAuthenticator provides the configuration for a single JWT authenticator. diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index 9ee1ef8a4b..3a6c66c3aa 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -57,6 +57,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) }); err != nil { @@ -324,6 +344,48 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginC return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s) } +func autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in, out, s) +} + func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { if in.JWT != nil { in, out := &in.JWT, &out.JWT @@ -336,6 +398,7 @@ func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_Authenticatio } else { out.JWT = nil } + out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } @@ -356,6 +419,7 @@ func autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_Authenticatio } else { out.JWT = nil } + out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index e618178bfe..81b652254c 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -78,6 +78,43 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { *out = *in @@ -89,6 +126,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go index 00a55f7a92..570f3c4682 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go @@ -156,6 +156,25 @@ type AuthenticationConfiguration struct { // "": "username" // } JWT []JWTAuthenticator `json:"jwt"` + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"` +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool `json:"enabled"` + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition `json:"conditions,omitempty"` +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string `json:"path"` } // JWTAuthenticator provides the configuration for a single JWT authenticator. diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go index 911a331f24..30ef049d40 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go @@ -37,6 +37,26 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) }); err != nil { @@ -260,6 +280,48 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } +func autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in, out, s) +} + func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { if in.JWT != nil { in, out := &in.JWT, &out.JWT @@ -272,6 +334,7 @@ func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_Authentication } else { out.JWT = nil } + out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } @@ -292,6 +355,7 @@ func autoConvert_apiserver_AuthenticationConfiguration_To_v1beta1_Authentication } else { out.JWT = nil } + out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go index 7da9db9272..0d78e51a96 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,43 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { *out = *in @@ -36,6 +73,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go index 471eb4a741..5e80e2dc69 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go @@ -78,6 +78,15 @@ func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration, dis } } + if c.Anonymous != nil { + if !utilfeature.DefaultFeatureGate.Enabled(features.AnonymousAuthConfigurableEndpoints) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("anonymous"), "anonymous is not supported when AnonymousAuthConfigurableEnpoints feature gate is disabled")) + } + if !c.Anonymous.Enabled && len(c.Anonymous.Conditions) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("anonymous", "conditions"), c.Anonymous.Conditions, "enabled should be set to true when conditions are defined")) + } + } + return allErrs } @@ -727,6 +736,7 @@ func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath compiler := authorizationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) seenExpressions := sets.NewString() var compilationResults []authorizationcel.CompilationResult + var usesFieldSelector, usesLabelSelector bool for i, condition := range matchConditions { fldPath := fldPath.Child("matchConditions").Index(i).Child("expression") @@ -745,12 +755,16 @@ func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath continue } compilationResults = append(compilationResults, compilationResult) + usesFieldSelector = usesFieldSelector || compilationResult.UsesFieldSelector + usesLabelSelector = usesLabelSelector || compilationResult.UsesLabelSelector } if len(compilationResults) == 0 { return nil, allErrs } return &authorizationcel.CELMatcher{ CompilationResults: compilationResults, + UsesFieldSelector: usesFieldSelector, + UsesLabelSelector: usesLabelSelector, }, allErrs } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index b88c47c672..6439e82208 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -100,6 +100,43 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { *out = *in @@ -111,6 +148,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index bda8c6953c..032c586913 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -48,11 +48,11 @@ message Event { optional string verb = 5; // Authenticated user information. - optional k8s.io.api.authentication.v1.UserInfo user = 6; + optional .k8s.io.api.authentication.v1.UserInfo user = 6; // Impersonated user information. // +optional - optional k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7; + optional .k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7; // Source IPs, from where the request originated and intermediate proxies. // The source IPs are listed from (in order): @@ -79,28 +79,28 @@ message Event { // For successful responses, this will only include the Code and StatusSuccess. // For non-status type error responses, this will be auto-populated with the error Message. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10; // API object from the request, in JSON format. The RequestObject is recorded as-is in the request // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or // merging. It is an external versioned object type, and may not be a valid object on its own. // Omitted for non-resource requests. Only logged at Request Level and higher. // +optional - optional k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11; + optional .k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11; // API object returned in the response, in JSON. The ResponseObject is recorded after conversion // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged // at Response Level. // +optional - optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12; + optional .k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12; // Time the request reached the apiserver. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13; // Time the request reached current audit stage. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14; // Annotations is an unstructured key value map stored with an audit event that may be set by // plugins invoked in the request serving chain, including authentication, authorization and @@ -115,7 +115,7 @@ message Event { // EventList is a list of audit Events. message EventList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated Event items = 2; } @@ -187,7 +187,7 @@ message ObjectReference { message Policy { // ObjectMeta is included for interoperability with API infrastructure. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules specify the audit Level a request should be recorded at. // A request may match multiple rules, in which case the FIRST matching rule is used. @@ -215,7 +215,7 @@ message Policy { // PolicyList is a list of audit Policies. message PolicyList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated Policy items = 2; } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go index 8e568c7e4d..76ef44732e 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/group" "k8s.io/apiserver/pkg/authentication/request/anonymous" @@ -39,7 +40,7 @@ import ( // DelegatingAuthenticatorConfig is the minimal configuration needed to create an authenticator // built to delegate authentication to a kube API server type DelegatingAuthenticatorConfig struct { - Anonymous bool + Anonymous *apiserver.AnonymousAuthConfig // TokenAccessReviewClient is a client to do token review. It can be nil. Then every token is ignored. TokenAccessReviewClient authenticationclient.AuthenticationV1Interface @@ -112,15 +113,15 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur } if len(authenticators) == 0 { - if c.Anonymous { - return anonymous.NewAuthenticator(), &securityDefinitions, nil + if c.Anonymous != nil && c.Anonymous.Enabled { + return anonymous.NewAuthenticator(c.Anonymous.Conditions), &securityDefinitions, nil } - return nil, nil, errors.New("No authentication method configured") + return nil, nil, errors.New("no authentication method configured") } authenticator := group.NewAuthenticatedGroupAdder(unionauth.New(authenticators...)) - if c.Anonymous { - authenticator = unionauth.NewFailOnError(authenticator, anonymous.NewAuthenticator()) + if c.Anonymous != nil && c.Anonymous.Enabled { + authenticator = unionauth.NewFailOnError(authenticator, anonymous.NewAuthenticator(c.Anonymous.Conditions)) } return authenticator, &securityDefinitions, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go index f9177d1513..8a88104353 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go @@ -19,25 +19,44 @@ package anonymous import ( "net/http" + "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" ) const ( - anonymousUser = user.Anonymous - + anonymousUser = user.Anonymous unauthenticatedGroup = user.AllUnauthenticated ) -func NewAuthenticator() authenticator.Request { - return authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) { - auds, _ := authenticator.AudiencesFrom(req.Context()) - return &authenticator.Response{ - User: &user.DefaultInfo{ - Name: anonymousUser, - Groups: []string{unauthenticatedGroup}, - }, - Audiences: auds, - }, true, nil - }) +type Authenticator struct { + allowedPaths map[string]bool +} + +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + if len(a.allowedPaths) > 0 && !a.allowedPaths[req.URL.Path] { + return nil, false, nil + } + + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: anonymousUser, + Groups: []string{unauthenticatedGroup}, + }, + Audiences: auds, + }, true, nil +} + +// NewAuthenticator returns a new anonymous authenticator. +// When conditions is empty all requests are authenticated as anonymous. +// When conditions are non-empty only those requests that match the at-least one +// condition are authenticated as anonymous. +func NewAuthenticator(conditions []apiserver.AnonymousAuthCondition) authenticator.Request { + allowedPaths := make(map[string]bool) + for _, c := range conditions { + allowedPaths[c.Path] = true + } + + return &Authenticator{allowedPaths: allowedPaths} } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go index d027327398..1826163939 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go @@ -17,9 +17,7 @@ limitations under the License. package headerrequest import ( - "crypto/x509" "fmt" - "io/ioutil" "net/http" "net/url" "strings" @@ -27,7 +25,6 @@ import ( "k8s.io/apiserver/pkg/authentication/authenticator" x509request "k8s.io/apiserver/pkg/authentication/request/x509" "k8s.io/apiserver/pkg/authentication/user" - utilcert "k8s.io/client-go/util/cert" ) // StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places. @@ -106,48 +103,6 @@ func trimHeaders(headerNames ...string) ([]string, error) { return ret, nil } -func NewSecure(clientCA string, proxyClientNames []string, nameHeaders []string, groupHeaders []string, extraHeaderPrefixes []string) (authenticator.Request, error) { - if len(clientCA) == 0 { - return nil, fmt.Errorf("missing clientCA file") - } - - // Wrap with an x509 verifier - caData, err := ioutil.ReadFile(clientCA) - if err != nil { - return nil, fmt.Errorf("error reading %s: %v", clientCA, err) - } - opts := x509request.DefaultVerifyOptions() - opts.Roots = x509.NewCertPool() - certs, err := utilcert.ParseCertsPEM(caData) - if err != nil { - return nil, fmt.Errorf("error loading certs from %s: %v", clientCA, err) - } - for _, cert := range certs { - opts.Roots.AddCert(cert) - } - - trimmedNameHeaders, err := trimHeaders(nameHeaders...) - if err != nil { - return nil, err - } - trimmedGroupHeaders, err := trimHeaders(groupHeaders...) - if err != nil { - return nil, err - } - trimmedExtraHeaderPrefixes, err := trimHeaders(extraHeaderPrefixes...) - if err != nil { - return nil, err - } - - return NewDynamicVerifyOptionsSecure( - x509request.StaticVerifierFn(opts), - StaticStringSlice(proxyClientNames), - StaticStringSlice(trimmedNameHeaders), - StaticStringSlice(trimmedGroupHeaders), - StaticStringSlice(trimmedExtraHeaderPrefixes), - ), nil -} - func NewDynamicVerifyOptionsSecure(verifyOptionFn x509request.VerifyOptionFunc, proxyClientNames, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { headerAuthenticator := NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go index d8c4090b12..dc844ee73b 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "sync/atomic" "time" corev1 "k8s.io/api/core/v1" @@ -35,7 +36,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "sync/atomic" ) const ( @@ -74,7 +74,7 @@ type RequestHeaderAuthRequestController struct { configmapInformer cache.SharedIndexInformer configmapInformerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // exportedRequestHeaderBundle is a requestHeaderBundle that contains the last read, non-zero length content of the configmap exportedRequestHeaderBundle atomic.Value @@ -104,7 +104,10 @@ func NewRequestHeaderAuthRequestController( extraHeaderPrefixesKey: extraHeaderPrefixesKey, allowedClientNamesKey: allowedClientNamesKey, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RequestHeaderAuthRequestController"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "RequestHeaderAuthRequestController"}, + ), } // we construct our own informer because we need such a small subset of the information available. Just one namespace. diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go index 8261c5b583..d39deb17eb 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -20,6 +20,8 @@ import ( "context" "net/http" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/authentication/user" ) @@ -62,6 +64,16 @@ type Attributes interface { // GetPath returns the path of the request GetPath() string + + // ParseFieldSelector is lazy, thread-safe, and stores the parsed result and error. + // It returns an error if the field selector cannot be parsed. + // The returned requirements must be treated as readonly and not modified. + GetFieldSelector() (fields.Requirements, error) + + // ParseLabelSelector is lazy, thread-safe, and stores the parsed result and error. + // It returns an error if the label selector cannot be parsed. + // The returned requirements must be treated as readonly and not modified. + GetLabelSelector() (labels.Requirements, error) } // Authorizer makes an authorization decision based on information gained by making @@ -100,6 +112,11 @@ type AttributesRecord struct { Name string ResourceRequest bool Path string + + FieldSelectorRequirements fields.Requirements + FieldSelectorParsingErr error + LabelSelectorRequirements labels.Requirements + LabelSelectorParsingErr error } func (a AttributesRecord) GetUser() user.Info { @@ -146,6 +163,14 @@ func (a AttributesRecord) GetPath() string { return a.Path } +func (a AttributesRecord) GetFieldSelector() (fields.Requirements, error) { + return a.FieldSelectorRequirements, a.FieldSelectorParsingErr +} + +func (a AttributesRecord) GetLabelSelector() (labels.Requirements, error) { + return a.LabelSelectorRequirements, a.LabelSelectorParsingErr +} + type Decision int const ( diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go index 0d9293dd70..829ff91d39 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go @@ -20,22 +20,34 @@ import ( "fmt" "github.com/google/cel-go/cel" + celast "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types/ref" authorizationv1 "k8s.io/api/authorization/v1" "k8s.io/apimachinery/pkg/util/version" apiservercel "k8s.io/apiserver/pkg/cel" "k8s.io/apiserver/pkg/cel/environment" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" ) const ( subjectAccessReviewRequestVarName = "request" + + fieldSelectorVarName = "fieldSelector" + labelSelectorVarName = "labelSelector" ) // CompilationResult represents a compiled authorization cel expression. type CompilationResult struct { Program cel.Program ExpressionAccessor ExpressionAccessor + + // These track if a given expression uses fieldSelector and labelSelector, + // so construction of data passed to the CEL expression can be optimized if those fields are unused. + UsesFieldSelector bool + UsesLabelSelector bool } // EvaluationResult contains the minimal required fields and metadata of a cel evaluation @@ -96,11 +108,50 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor) (C return resultError(reason, apiservercel.ErrorTypeInvalid) } - _, err = cel.AstToCheckedExpr(ast) + checkedExpr, err := cel.AstToCheckedExpr(ast) if err != nil { // should be impossible since env.Compile returned no issues return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) } + celAST, err := celast.ToAST(checkedExpr) + if err != nil { + // should be impossible since env.Compile returned no issues + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) + } + + var usesFieldSelector, usesLabelSelector bool + celast.PreOrderVisit(celast.NavigateAST(celAST), celast.NewExprVisitor(func(e celast.Expr) { + // we already know we use both, no need to inspect more + if usesFieldSelector && usesLabelSelector { + return + } + + var fieldName string + switch e.Kind() { + case celast.SelectKind: + // simple select (.fieldSelector / .labelSelector) + fieldName = e.AsSelect().FieldName() + case celast.CallKind: + // optional select (.?fieldSelector / .?labelSelector) + if e.AsCall().FunctionName() != operators.OptSelect { + return + } + args := e.AsCall().Args() + // args[0] is the receiver (what comes before the `.?`), args[1] is the field name being optionally selected (what comes after the `.?`) + if len(args) != 2 || args[1].Kind() != celast.LiteralKind || args[1].AsLiteral().Type() != cel.StringType { + return + } + fieldName, _ = args[1].AsLiteral().Value().(string) + } + + switch fieldName { + case fieldSelectorVarName: + usesFieldSelector = true + case labelSelectorVarName: + usesLabelSelector = true + } + })) + prog, err := env.Program(ast) if err != nil { return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) @@ -108,6 +159,8 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor) (C return CompilationResult{ Program: prog, ExpressionAccessor: expressionAccessor, + UsesFieldSelector: usesFieldSelector, + UsesLabelSelector: usesLabelSelector, }, nil } @@ -161,7 +214,7 @@ func buildRequestType(field func(name string, declType *apiservercel.DeclType, r // buildResourceAttributesType generates a DeclType for ResourceAttributes. // if attributes are added here, also add to convertObjectToUnstructured. func buildResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { - return apiservercel.NewObjectType("kubernetes.ResourceAttributes", fields( + resourceAttributesFields := []*apiservercel.DeclField{ field("namespace", apiservercel.StringType, false), field("verb", apiservercel.StringType, false), field("group", apiservercel.StringType, false), @@ -169,6 +222,33 @@ func buildResourceAttributesType(field func(name string, declType *apiservercel. field("resource", apiservercel.StringType, false), field("subresource", apiservercel.StringType, false), field("name", apiservercel.StringType, false), + } + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + resourceAttributesFields = append(resourceAttributesFields, field("fieldSelector", buildFieldSelectorType(field, fields), false)) + resourceAttributesFields = append(resourceAttributesFields, field("labelSelector", buildLabelSelectorType(field, fields), false)) + } + return apiservercel.NewObjectType("kubernetes.ResourceAttributes", fields(resourceAttributesFields...)) +} + +func buildFieldSelectorType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.FieldSelectorAttributes", fields( + field("rawSelector", apiservercel.StringType, false), + field("requirements", apiservercel.NewListType(buildSelectorRequirementType(field, fields), -1), false), + )) +} + +func buildLabelSelectorType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.LabelSelectorAttributes", fields( + field("rawSelector", apiservercel.StringType, false), + field("requirements", apiservercel.NewListType(buildSelectorRequirementType(field, fields), -1), false), + )) +} + +func buildSelectorRequirementType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.SelectorRequirement", fields( + field("key", apiservercel.StringType, false), + field("operator", apiservercel.StringType, false), + field("values", apiservercel.NewListType(apiservercel.StringType, -1), false), )) } @@ -181,7 +261,7 @@ func buildNonResourceAttributesType(field func(name string, declType *apiserverc )) } -func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) map[string]interface{} { +func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec, includeFieldSelector, includeLabelSelector bool) map[string]interface{} { // Construct version containing every SubjectAccessReview user and string attribute field, even omitempty ones, for evaluation by CEL extra := obj.Extra if extra == nil { @@ -194,7 +274,7 @@ func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) m "extra": extra, } if obj.ResourceAttributes != nil { - ret["resourceAttributes"] = map[string]string{ + resourceAttributes := map[string]interface{}{ "namespace": obj.ResourceAttributes.Namespace, "verb": obj.ResourceAttributes.Verb, "group": obj.ResourceAttributes.Group, @@ -203,6 +283,42 @@ func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) m "subresource": obj.ResourceAttributes.Subresource, "name": obj.ResourceAttributes.Name, } + + if includeFieldSelector && obj.ResourceAttributes.FieldSelector != nil { + if len(obj.ResourceAttributes.FieldSelector.Requirements) > 0 { + requirements := make([]map[string]interface{}, 0, len(obj.ResourceAttributes.FieldSelector.Requirements)) + for _, r := range obj.ResourceAttributes.FieldSelector.Requirements { + requirements = append(requirements, map[string]interface{}{ + "key": r.Key, + "operator": r.Operator, + "values": r.Values, + }) + } + resourceAttributes[fieldSelectorVarName] = map[string]interface{}{"requirements": requirements} + } + if len(obj.ResourceAttributes.FieldSelector.RawSelector) > 0 { + resourceAttributes[fieldSelectorVarName] = map[string]interface{}{"rawSelector": obj.ResourceAttributes.FieldSelector.RawSelector} + } + } + + if includeLabelSelector && obj.ResourceAttributes.LabelSelector != nil { + if len(obj.ResourceAttributes.LabelSelector.Requirements) > 0 { + requirements := make([]map[string]interface{}, 0, len(obj.ResourceAttributes.LabelSelector.Requirements)) + for _, r := range obj.ResourceAttributes.LabelSelector.Requirements { + requirements = append(requirements, map[string]interface{}{ + "key": r.Key, + "operator": r.Operator, + "values": r.Values, + }) + } + resourceAttributes[labelSelectorVarName] = map[string]interface{}{"requirements": requirements} + } + if len(obj.ResourceAttributes.LabelSelector.RawSelector) > 0 { + resourceAttributes[labelSelectorVarName] = map[string]interface{}{"rawSelector": obj.ResourceAttributes.LabelSelector.RawSelector} + } + } + + ret["resourceAttributes"] = resourceAttributes } if obj.NonResourceAttributes != nil { ret["nonResourceAttributes"] = map[string]string{ diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go index 3202173a8d..eac97100c1 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go @@ -30,6 +30,11 @@ import ( type CELMatcher struct { CompilationResults []CompilationResult + // These track if any expressions use fieldSelector and labelSelector, + // so construction of data passed to the CEL expression can be optimized if those fields are unused. + UsesLabelSelector bool + UsesFieldSelector bool + // These are optional fields which can be populated if metrics reporting is desired Metrics MatcherMetrics AuthorizerType string @@ -53,7 +58,7 @@ func (c *CELMatcher) Eval(ctx context.Context, r *authorizationv1.SubjectAccessR }() va := map[string]interface{}{ - "request": convertObjectToUnstructured(&r.Spec), + "request": convertObjectToUnstructured(&r.Spec, c.UsesFieldSelector, c.UsesLabelSelector), } for _, compilationResult := range c.CompilationResults { evalResult, _, err := compilationResult.Program.ContextEval(ctx, va) diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go index 2cea83c2e1..563d34e13f 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -20,6 +20,7 @@ import ( "fmt" "strconv" "sync" + "sync/atomic" "github.com/google/cel-go/cel" "github.com/google/cel-go/checker" @@ -30,20 +31,29 @@ import ( "k8s.io/apimachinery/pkg/util/version" celconfig "k8s.io/apiserver/pkg/apis/cel" "k8s.io/apiserver/pkg/cel/library" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utilversion "k8s.io/apiserver/pkg/util/version" ) // DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet // that guarantees compatibility with CEL features/libraries/parameters understood by -// an n-1 version +// the api server min compatibility version // -// This default will be set to no more than n-1 the current Kubernetes major.minor version. +// This default will be set to no more than the current Kubernetes major.minor version. // -// Note that a default version number less than n-1 indicates a wider range of version -// compatibility than strictly required for rollback. A wide range of compatibility is -// desirable because it means that CEL expressions are portable across a wider range -// of Kubernetes versions. +// Note that a default version number less than n-1 the current Kubernetes major.minor version +// indicates a wider range of version compatibility than strictly required for rollback. +// A wide range of compatibility is desirable because it means that CEL expressions are portable +// across a wider range of Kubernetes versions. +// A default version number equal to the current Kubernetes major.minor version +// indicates fast forward CEL features that can be used when rollback is no longer needed. func DefaultCompatibilityVersion() *version.Version { - return version.MajorMinor(1, 29) + effectiveVer := utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent) + if effectiveVer == nil { + effectiveVer = utilversion.DefaultKubeEffectiveVersion() + } + return effectiveVer.MinCompatibilityVersion() } var baseOpts = append(baseOptsWithoutStrictCost, StrictCostOpt) @@ -132,6 +142,45 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ library.CIDR(), }, }, + // Format Library + { + IntroducedVersion: version.MajorMinor(1, 31), + EnvOptions: []cel.EnvOption{ + library.Format(), + }, + }, + // Authz selectors + { + IntroducedVersion: version.MajorMinor(1, 31), + FeatureEnabled: func() bool { + enabled := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) + authzSelectorsLibraryInit.Do(func() { + // Record the first time feature enablement was checked for this library. + // This is checked from integration tests to ensure no cached cel envs + // are constructed before feature enablement is effectively set. + authzSelectorsLibraryEnabled.Store(enabled) + // Uncomment to debug where the first initialization is coming from if needed. + // debug.PrintStack() + }) + return enabled + }, + EnvOptions: []cel.EnvOption{ + library.AuthzSelectors(), + }, + }, +} + +var ( + authzSelectorsLibraryInit sync.Once + authzSelectorsLibraryEnabled atomic.Value +) + +// AuthzSelectorsLibraryEnabled returns whether the AuthzSelectors library was enabled when it was constructed. +// If it has not been contructed yet, this returns `false, false`. +// This is solely for the benefit of the integration tests making sure feature gates get correctly parsed before AuthzSelector ever has to check for enablement. +func AuthzSelectorsLibraryEnabled() (enabled, constructed bool) { + enabled, constructed = authzSelectorsLibraryEnabled.Load().(bool) + return } var StrictCostOpt = VersionedOptions{ diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go b/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go index b47bc8e984..07b9e8f546 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go @@ -175,7 +175,15 @@ type VersionedOptions struct { // // Optional. RemovedVersion *version.Version - + // FeatureEnabled returns true if these options are enabled by feature gates, + // and returns false if these options are not enabled due to feature gates. + // + // This takes priority over IntroducedVersion / RemovedVersion for the NewExpressions environment. + // + // The StoredExpressions environment ignores this function. + // + // Optional. + FeatureEnabled func() bool // EnvOptions provides CEL EnvOptions. This may be used to add a cel.Variable, a // cel.Library, or to enable other CEL EnvOptions such as language settings. // @@ -210,7 +218,7 @@ type VersionedOptions struct { // making multiple calls to Extend. func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { if len(options) > 0 { - newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, options) + newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, true, options) if err != nil { return nil, err } @@ -218,7 +226,7 @@ func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { if err != nil { return nil, err } - storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), options) + storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), false, options) if err != nil { return nil, err } @@ -231,13 +239,26 @@ func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { return e, nil } -func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, opts []VersionedOptions) (cel.EnvOption, error) { +func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, honorFeatureGateEnablement bool, opts []VersionedOptions) (cel.EnvOption, error) { var envOpts []cel.EnvOption var progOpts []cel.ProgramOption var declTypes []*apiservercel.DeclType for _, opt := range opts { + var allowedByFeatureGate, allowedByVersion bool + if opt.FeatureEnabled != nil && honorFeatureGateEnablement { + // Feature-gate-enabled libraries must follow compatible default feature enablement. + // Enabling alpha features in their first release enables libraries the previous API server is unaware of. + allowedByFeatureGate = opt.FeatureEnabled() + if !allowedByFeatureGate { + continue + } + } if compatVer.AtLeast(opt.IntroducedVersion) && (opt.RemovedVersion == nil || compatVer.LessThan(opt.RemovedVersion)) { + allowedByVersion = true + } + + if allowedByFeatureGate || allowedByVersion { envOpts = append(envOpts, opt.EnvOptions...) progOpts = append(progOpts, opt.ProgramOptions...) declTypes = append(declTypes, opt.DeclTypes...) @@ -246,7 +267,10 @@ func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, o if len(declTypes) > 0 { provider := apiservercel.NewDeclTypeProvider(declTypes...) - providerOpts, err := provider.EnvOptions(base.TypeProvider()) + if compatVer.AtLeast(version.MajorMinor(1, 31)) { + provider.SetRecognizeKeywordAsFieldName(true) + } + providerOpts, err := provider.EnvOptions(base.CELTypeProvider()) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/cel/errors.go b/vendor/k8s.io/apiserver/pkg/cel/errors.go index 907ca6ec8f..d7b052fc9e 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/errors.go +++ b/vendor/k8s.io/apiserver/pkg/cel/errors.go @@ -16,11 +16,46 @@ limitations under the License. package cel +import ( + "fmt" + + "github.com/google/cel-go/cel" +) + +// ErrInternal the basic error that occurs when the expression fails to evaluate +// due to internal reasons. Any Error that has the Type of +// ErrorInternal is considered equal to ErrInternal +var ErrInternal = fmt.Errorf("internal") + +// ErrInvalid is the basic error that occurs when the expression fails to +// evaluate but not due to internal reasons. Any Error that has the Type of +// ErrorInvalid is considered equal to ErrInvalid. +var ErrInvalid = fmt.Errorf("invalid") + +// ErrRequired is the basic error that occurs when the expression is required +// but absent. +// Any Error that has the Type of ErrorRequired is considered equal +// to ErrRequired. +var ErrRequired = fmt.Errorf("required") + +// ErrCompilation is the basic error that occurs when the expression fails to +// compile. Any CompilationError wraps ErrCompilation. +// ErrCompilation wraps ErrInvalid +var ErrCompilation = fmt.Errorf("%w: compilation error", ErrInvalid) + +// ErrOutOfBudget is the basic error that occurs when the expression fails due to +// exceeding budget. +var ErrOutOfBudget = fmt.Errorf("out of budget") + // Error is an implementation of the 'error' interface, which represents a // XValidation error. type Error struct { Type ErrorType Detail string + + // Cause is an optional wrapped errors that can be useful to + // programmatically retrieve detailed errors. + Cause error } var _ error = &Error{} @@ -30,7 +65,24 @@ func (v *Error) Error() string { return v.Detail } -// ErrorType is a machine readable value providing more detail about why +func (v *Error) Is(err error) bool { + switch v.Type { + case ErrorTypeRequired: + return err == ErrRequired + case ErrorTypeInvalid: + return err == ErrInvalid + case ErrorTypeInternal: + return err == ErrInternal + } + return false +} + +// Unwrap returns the wrapped Cause. +func (v *Error) Unwrap() error { + return v.Cause +} + +// ErrorType is a machine-readable value providing more detail about why // a XValidation is invalid. type ErrorType string @@ -45,3 +97,28 @@ const ( // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" ) + +// CompilationError indicates an error during expression compilation. +// It wraps ErrCompilation. +type CompilationError struct { + err *Error + Issues *cel.Issues +} + +// NewCompilationError wraps a cel.Issues to indicate a compilation failure. +func NewCompilationError(issues *cel.Issues) *CompilationError { + return &CompilationError{ + Issues: issues, + err: &Error{ + Type: ErrorTypeInvalid, + Detail: fmt.Sprintf("compilation error: %s", issues), + }} +} + +func (e *CompilationError) Error() string { + return e.err.Error() +} + +func (e *CompilationError) Unwrap() []error { + return []error{e.err, ErrCompilation} +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/format.go b/vendor/k8s.io/apiserver/pkg/cel/format.go new file mode 100644 index 0000000000..1bcfddfe76 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/format.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +var ( + FormatObject = decls.NewObjectType("kubernetes.NamedFormat") + FormatType = cel.ObjectType("kubernetes.NamedFormat") +) + +// Format provdes a CEL representation of kubernetes format +type Format struct { + Name string + ValidateFunc func(string) []string + + // Size of the regex string or estimated equivalent regex string used + // for cost estimation + MaxRegexSize int +} + +func (d *Format) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + return nil, fmt.Errorf("type conversion error from 'Format' to '%v'", typeDesc) +} + +func (d *Format) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case FormatType: + return d + case types.TypeType: + return FormatType + default: + return types.NewErr("type conversion error from '%s' to '%s'", FormatType, typeVal) + } +} + +func (d *Format) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(*Format) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(d.Name == otherDur.Name) +} + +func (d *Format) Type() ref.Type { + return FormatType +} + +func (d *Format) Value() interface{} { + return d +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go index df4bf08071..1fd489fc91 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go @@ -22,6 +22,11 @@ import ( "reflect" "strings" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -194,6 +199,30 @@ import ( // Examples: // // authorizer.group('').resource('pods').namespace('default').check('create').error() +// +// fieldSelector +// +// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check. +// If the field selector does not parse successfully, no field selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+, Authz library version 1. +// +// .fieldSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed() +// +// labelSelector (added in v1, Kubernetes 1.31+) +// +// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check. +// If the label selector does not parse successfully, no label selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+, Authz library version 1. +// +// .labelSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed() func Authz() cel.EnvOption { return cel.Lib(authzLib) } @@ -259,6 +288,66 @@ func (*authz) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } +// AuthzSelectors provides a CEL function library extension for adding fieldSelector and +// labelSelector filters to authorization checks. This requires the Authz library. +// See documentation of the Authz library for use and availability of the authorizer variable. +// +// fieldSelector +// +// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check. +// If the field selector does not parse successfully, no field selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+. +// +// .fieldSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed() +// +// labelSelector +// +// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check. +// If the label selector does not parse successfully, no label selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+. +// +// .labelSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed() +func AuthzSelectors() cel.EnvOption { + return cel.Lib(authzSelectorsLib) +} + +var authzSelectorsLib = &authzSelectors{} + +type authzSelectors struct{} + +func (*authzSelectors) LibraryName() string { + return "k8s.authzSelectors" +} + +var authzSelectorsLibraryDecls = map[string][]cel.FunctionOpt{ + "fieldSelector": { + cel.MemberOverload("authorizer_fieldselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckFieldSelector))}, + "labelSelector": { + cel.MemberOverload("authorizer_labelselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckLabelSelector))}, +} + +func (*authzSelectors) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(authzSelectorsLibraryDecls)) + for name, overloads := range authzSelectorsLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*authzSelectors) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + func authorizerPath(arg1, arg2 ref.Val) ref.Val { authz, ok := arg1.(authorizerVal) if !ok { @@ -354,6 +443,38 @@ func resourceCheckSubresource(arg1, arg2 ref.Val) ref.Val { return result } +func resourceCheckFieldSelector(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + fieldSelector, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.fieldSelector = fieldSelector + return result +} + +func resourceCheckLabelSelector(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + labelSelector, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.labelSelector = labelSelector + return result +} + func resourceCheckNamespace(arg1, arg2 ref.Val) ref.Val { resourceCheck, ok := arg1.(resourceCheckVal) if !ok { @@ -544,11 +665,13 @@ func (g groupCheckVal) resourceCheck(resource string) resourceCheckVal { type resourceCheckVal struct { receiverOnlyObjectVal - groupCheck groupCheckVal - resource string - subresource string - namespace string - name string + groupCheck groupCheckVal + resource string + subresource string + namespace string + name string + fieldSelector string + labelSelector string } func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val { @@ -563,6 +686,26 @@ func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val { Verb: verb, User: a.groupCheck.authorizer.userInfo, } + + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + if len(a.fieldSelector) > 0 { + selector, err := fields.ParseSelector(a.fieldSelector) + if err != nil { + attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = nil, err + } else { + attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = selector.Requirements(), nil + } + } + if len(a.labelSelector) > 0 { + requirements, err := labels.ParseToRequirements(a.labelSelector) + if err != nil { + attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = nil, err + } else { + attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = requirements, nil + } + } + } + decision, reason, err := a.groupCheck.authorizer.authAuthorizer.Authorize(ctx, attr) return newDecision(decision, err, reason) } diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go index e3bde017be..b71686309a 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go @@ -17,16 +17,37 @@ limitations under the License. package library import ( + "fmt" "math" "github.com/google/cel-go/checker" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + + "k8s.io/apiserver/pkg/cel" ) +// panicOnUnknown makes cost estimate functions panic on unrecognized functions. +// This is only set to true for unit tests. +var panicOnUnknown = false + +// builtInFunctions is a list of functions used in cost tests that are not handled by CostEstimator. +var knownUnhandledFunctions = map[string]bool{ + "uint": true, + "duration": true, + "bytes": true, + "timestamp": true, + "value": true, + "_==_": true, + "_&&_": true, + "_>_": true, + "!_": true, + "strings.quote": true, +} + // CostEstimator implements CEL's interpretable.ActualCostEstimator and checker.CostEstimator. type CostEstimator struct { // SizeEstimator provides a CostEstimator.EstimateSize that this CostEstimator will delegate size estimation @@ -34,6 +55,25 @@ type CostEstimator struct { SizeEstimator checker.CostEstimator } +const ( + // shortest repeatable selector requirement that allocates a values slice is 2 characters: k, + selectorLengthToRequirementCount = float64(.5) + // the expensive parts to represent each requirement are a struct and a values slice + costPerRequirement = float64(common.ListCreateBaseCost + common.StructCreateBaseCost) +) + +// a selector consists of a list of requirements held in a slice +var baseSelectorCost = checker.CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost} + +func selectorCostEstimate(selectorLength checker.SizeEstimate) checker.CostEstimate { + parseCost := selectorLength.MultiplyByCostFactor(common.StringTraversalCostFactor) + + requirementCount := selectorLength.MultiplyByCostFactor(selectorLengthToRequirementCount) + requirementCost := requirementCount.MultiplyByCostFactor(costPerRequirement) + + return baseSelectorCost.Add(parseCost).Add(requirementCost) +} + func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, result ref.Val) *uint64 { switch function { case "check": @@ -45,6 +85,13 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re // All authorization builder and accessor functions have a nominal cost cost := uint64(1) return &cost + case "fieldSelector", "labelSelector": + // field and label selector parse is a string parse into a structured set of requirements + if len(args) >= 2 { + selectorLength := actualSize(args[1]) + cost := selectorCostEstimate(checker.SizeEstimate{Min: selectorLength, Max: selectorLength}) + return &cost.Max + } case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf": var cost uint64 if len(args) > 0 { @@ -105,7 +152,7 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re cost := uint64(math.Ceil(float64(actualSize(args[0])) * 2 * common.StringTraversalCostFactor)) return &cost } - case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast": + case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast": // IP and CIDR accessors are nominal cost. cost := uint64(1) return &cost @@ -152,9 +199,45 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) return &cost } + case "validate": + if len(args) >= 2 { + format, isFormat := args[0].Value().(*cel.Format) + if isFormat { + strSize := actualSize(args[1]) + + // Dont have access to underlying regex, estimate a long regexp + regexSize := format.MaxRegexSize + + // Copied from CEL implementation for regex cost + // + // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := uint64(math.Ceil((1.0 + float64(strSize)) * common.StringTraversalCostFactor)) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := uint64(math.Ceil(float64(regexSize) * common.RegexStringLengthCostFactor)) + cost := strCost * regexCost + return &cost + } + } + case "format.named": + // Simply dictionary lookup + cost := uint64(1) + return &cost case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub": cost := uint64(1) return &cost + case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery": + // url accessors + cost := uint64(1) + return &cost + } + if panicOnUnknown && !knownUnhandledFunctions[function] { + panic(fmt.Errorf("CallCost: unhandled function %q or args %v", function, args)) } return nil } @@ -170,6 +253,11 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored": // All authorization builder and accessor functions have a nominal cost return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "fieldSelector", "labelSelector": + // field and label selector parse is a string parse into a structured set of requirements + if len(args) == 1 { + return &checker.CallEstimate{CostEstimate: selectorCostEstimate(l.sizeEstimate(args[0]))} + } case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf": if target != nil { // Charge 1 cost for comparing each element in the list @@ -255,8 +343,10 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch // Worst case size is where is that a separator of "" is used, and each char is returned as a list element. max := sz.Max if len(args) > 1 { - if c := args[1].Expr().GetConstExpr(); c != nil { - max = uint64(c.GetInt64Value()) + if v := args[1].Expr().AsLiteral(); v != nil { + if i, ok := v.Value().(int64); ok { + max = uint64(i) + } } } // Cost is the traversal plus the construction of the result. @@ -327,7 +417,7 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch // So we double the cost of parsing the string. return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor)} } - case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast": + case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast": // IP and CIDR accessors are nominal cost. return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} case "containsIP": @@ -373,8 +463,21 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch sz := l.sizeEstimate(args[0]) return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} } + case "validate": + if target != nil { + sz := l.sizeEstimate(args[0]) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).MultiplyByCostFactor(cel.MaxNameFormatRegexSize * common.RegexStringLengthCostFactor)} + } + case "format.named": + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub": return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery": + // url accessors + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + } + if panicOnUnknown && !knownUnhandledFunctions[function] { + panic(fmt.Errorf("EstimateCallCost: unhandled function %q, target %v, args %v", function, target, args)) } return nil } @@ -383,6 +486,10 @@ func actualSize(value ref.Val) uint64 { if sz, ok := value.(traits.Sizer); ok { return uint64(sz.Size().(types.Int)) } + if panicOnUnknown { + // debug.PrintStack() + panic(fmt.Errorf("actualSize: non-sizer type %T", value)) + } return 1 } @@ -425,7 +532,7 @@ func (l *CostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstim type itemsNode struct { path []string t *types.Type - expr *exprpb.Expr + expr ast.Expr } func (i *itemsNode) Path() []string { @@ -436,7 +543,7 @@ func (i *itemsNode) Type() *types.Type { return i.t } -func (i *itemsNode) Expr() *exprpb.Expr { +func (i *itemsNode) Expr() ast.Expr { return i.expr } @@ -444,6 +551,8 @@ func (i *itemsNode) ComputedSize() *checker.SizeEstimate { return nil } +var _ checker.AstNode = (*itemsNode)(nil) + // traversalCost computes the cost of traversing a ref.Val as a data tree. func traversalCost(v ref.Val) uint64 { // TODO: This could potentially be optimized by sampling maps and lists instead of traversing. diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/format.go b/vendor/k8s.io/apiserver/pkg/cel/library/format.go new file mode 100644 index 0000000000..c051f33c00 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/format.go @@ -0,0 +1,270 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "fmt" + "net/url" + + "github.com/asaskevich/govalidator" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/kube-openapi/pkg/validation/strfmt" +) + +// Format provides a CEL library exposing common named Kubernetes string +// validations. Can be used in CRD ValidationRules messageExpression. +// +// Example: +// +// rule: format.dns1123label.validate(object.metadata.name).hasValue() +// messageExpression: format.dns1123label.validate(object.metadata.name).value().join("\n") +// +// format.named(name: string) -> ?Format +// +// Returns the Format with the given name, if it exists. Otherwise, optional.none +// Allowed names are: +// - `dns1123Label` +// - `dns1123Subdomain` +// - `dns1035Label` +// - `qualifiedName` +// - `dns1123LabelPrefix` +// - `dns1123SubdomainPrefix` +// - `dns1035LabelPrefix` +// - `labelValue` +// - `uri` +// - `uuid` +// - `byte` +// - `date` +// - `datetime` +// +// format.() -> Format +// +// Convenience functions for all the named formats are also available +// +// Examples: +// format.dns1123Label().validate("my-label-name") +// format.dns1123Subdomain().validate("apiextensions.k8s.io") +// format.dns1035Label().validate("my-label-name") +// format.qualifiedName().validate("apiextensions.k8s.io/v1beta1") +// format.dns1123LabelPrefix().validate("my-label-prefix-") +// format.dns1123SubdomainPrefix().validate("mysubdomain.prefix.-") +// format.dns1035LabelPrefix().validate("my-label-prefix-") +// format.uri().validate("http://example.com") +// Uses same pattern as isURL, but returns an error +// format.uuid().validate("123e4567-e89b-12d3-a456-426614174000") +// format.byte().validate("aGVsbG8=") +// format.date().validate("2021-01-01") +// format.datetime().validate("2021-01-01T00:00:00Z") +// + +// .validate(str: string) -> ?list +// +// Validates the given string against the given format. Returns optional.none +// if the string is valid, otherwise a list of validation error strings. +func Format() cel.EnvOption { + return cel.Lib(formatLib) +} + +var formatLib = &format{} + +type format struct{} + +func (*format) LibraryName() string { + return "format" +} + +func ZeroArgumentFunctionBinding(binding func() ref.Val) decls.OverloadOpt { + return func(o *decls.OverloadDecl) (*decls.OverloadDecl, error) { + wrapped, err := decls.FunctionBinding(func(values ...ref.Val) ref.Val { return binding() })(o) + if err != nil { + return nil, err + } + if len(wrapped.ArgTypes()) != 0 { + return nil, fmt.Errorf("function binding must have 0 arguments") + } + return o, nil + } +} + +func (*format) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(formatLibraryDecls)) + for name, overloads := range formatLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + for name, constantValue := range ConstantFormats { + prefixedName := "format." + name + options = append(options, cel.Function(prefixedName, cel.Overload(prefixedName, []*cel.Type{}, apiservercel.FormatType, ZeroArgumentFunctionBinding(func() ref.Val { + return constantValue + })))) + } + return options +} + +func (*format) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +var ConstantFormats map[string]*apiservercel.Format = map[string]*apiservercel.Format{ + "dns1123Label": { + Name: "DNS1123Label", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, false) }, + MaxRegexSize: 30, + }, + "dns1123Subdomain": { + Name: "DNS1123Subdomain", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, false) }, + MaxRegexSize: 60, + }, + "dns1035Label": { + Name: "DNS1035Label", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, false) }, + MaxRegexSize: 30, + }, + "qualifiedName": { + Name: "QualifiedName", + ValidateFunc: validation.IsQualifiedName, + MaxRegexSize: 60, // uses subdomain regex + }, + + "dns1123LabelPrefix": { + Name: "DNS1123LabelPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, true) }, + MaxRegexSize: 30, + }, + "dns1123SubdomainPrefix": { + Name: "DNS1123SubdomainPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, true) }, + MaxRegexSize: 60, + }, + "dns1035LabelPrefix": { + Name: "DNS1035LabelPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, true) }, + MaxRegexSize: 30, + }, + "labelValue": { + Name: "LabelValue", + ValidateFunc: validation.IsValidLabelValue, + MaxRegexSize: 40, + }, + + // CRD formats + // Implementations sourced from strfmt, which kube-openapi uses as its + // format library. There are other CRD formats supported, but they are + // covered by other portions of the CEL library (like IP/CIDR), or their + // use is discouraged (like bsonobjectid, email, etc) + "uri": { + Name: "URI", + ValidateFunc: func(s string) []string { + // Directly call ParseRequestURI since we can get a better error message + _, err := url.ParseRequestURI(s) + if err != nil { + return []string{err.Error()} + } + return nil + }, + // Use govalidator url regex to estimate, since ParseRequestURI + // doesnt use regex + MaxRegexSize: len(govalidator.URL), + }, + "uuid": { + Name: "uuid", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("uuid", s) { + return []string{"does not match the UUID format"} + } + return nil + }, + MaxRegexSize: len(strfmt.UUIDPattern), + }, + "byte": { + Name: "byte", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("byte", s) { + return []string{"invalid base64"} + } + return nil + }, + MaxRegexSize: len(govalidator.Base64), + }, + "date": { + Name: "date", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("date", s) { + return []string{"invalid date"} + } + return nil + }, + // Estimated regex size for RFC3339FullDate which is + // a date format. Assume a date-time pattern is longer + // so use that to conservatively estimate this + MaxRegexSize: len(strfmt.DateTimePattern), + }, + "datetime": { + Name: "datetime", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("datetime", s) { + return []string{"invalid datetime"} + } + return nil + }, + MaxRegexSize: len(strfmt.DateTimePattern), + }, +} + +var formatLibraryDecls = map[string][]cel.FunctionOpt{ + "validate": { + cel.MemberOverload("format-validate", []*cel.Type{apiservercel.FormatType, cel.StringType}, cel.OptionalType(cel.ListType(cel.StringType)), cel.BinaryBinding(formatValidate)), + }, + "format.named": { + cel.Overload("format-named", []*cel.Type{cel.StringType}, cel.OptionalType(apiservercel.FormatType), cel.UnaryBinding(func(name ref.Val) ref.Val { + nameString, ok := name.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(name) + } + + f, ok := ConstantFormats[nameString] + if !ok { + return types.OptionalNone + } + return types.OptionalOf(f) + })), + }, +} + +func formatValidate(arg1, arg2 ref.Val) ref.Val { + f, ok := arg1.Value().(*apiservercel.Format) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + str, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + + res := f.ValidateFunc(str) + if len(res) == 0 { + return types.OptionalNone + } + return types.OptionalOf(types.NewStringList(types.DefaultTypeAdapter, res)) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/limits.go b/vendor/k8s.io/apiserver/pkg/cel/limits.go index 65c6ad5f3a..66ab4e44c9 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/limits.go +++ b/vendor/k8s.io/apiserver/pkg/cel/limits.go @@ -47,4 +47,6 @@ const ( MinBoolSize = 4 // MinNumberSize is the length of literal 0 MinNumberSize = 1 + + MaxNameFormatRegexSize = 128 ) diff --git a/vendor/k8s.io/apiserver/pkg/cel/quantity.go b/vendor/k8s.io/apiserver/pkg/cel/quantity.go index 1057e33fe8..ce82396436 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/quantity.go +++ b/vendor/k8s.io/apiserver/pkg/cel/quantity.go @@ -50,7 +50,7 @@ func (d Quantity) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { func (d Quantity) ConvertToType(typeVal ref.Type) ref.Val { switch typeVal { - case typeValue: + case quantityTypeValue: return d case types.TypeType: return quantityTypeValue diff --git a/vendor/k8s.io/apiserver/pkg/cel/types.go b/vendor/k8s.io/apiserver/pkg/cel/types.go index bd14e16974..83c90c8916 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/types.go +++ b/vendor/k8s.io/apiserver/pkg/cel/types.go @@ -27,7 +27,7 @@ import ( "github.com/google/cel-go/common/types/traits" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - "google.golang.org/protobuf/proto" + "k8s.io/apimachinery/pkg/api/resource" ) const ( @@ -348,9 +348,14 @@ func NewDeclTypeProvider(rootTypes ...*DeclType) *DeclTypeProvider { // DeclTypeProvider extends the CEL ref.TypeProvider interface and provides an Open API Schema-based // type-system. type DeclTypeProvider struct { - registeredTypes map[string]*DeclType - typeProvider ref.TypeProvider - typeAdapter ref.TypeAdapter + registeredTypes map[string]*DeclType + typeProvider types.Provider + typeAdapter types.Adapter + recognizeKeywordAsFieldName bool +} + +func (rt *DeclTypeProvider) SetRecognizeKeywordAsFieldName(recognize bool) { + rt.recognizeKeywordAsFieldName = recognize } func (rt *DeclTypeProvider) EnumValue(enumName string) ref.Val { @@ -365,7 +370,7 @@ func (rt *DeclTypeProvider) FindIdent(identName string) (ref.Val, bool) { // as well as a custom ref.TypeProvider. // // If the DeclTypeProvider value is nil, an empty []cel.EnvOption set is returned. -func (rt *DeclTypeProvider) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, error) { +func (rt *DeclTypeProvider) EnvOptions(tp types.Provider) ([]cel.EnvOption, error) { if rt == nil { return []cel.EnvOption{}, nil } @@ -381,54 +386,52 @@ func (rt *DeclTypeProvider) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, er // WithTypeProvider returns a new DeclTypeProvider that sets the given TypeProvider // If the original DeclTypeProvider is nil, the returned DeclTypeProvider is still nil. -func (rt *DeclTypeProvider) WithTypeProvider(tp ref.TypeProvider) (*DeclTypeProvider, error) { +func (rt *DeclTypeProvider) WithTypeProvider(tp types.Provider) (*DeclTypeProvider, error) { if rt == nil { return nil, nil } - var ta ref.TypeAdapter = types.DefaultTypeAdapter - tpa, ok := tp.(ref.TypeAdapter) + var ta types.Adapter = types.DefaultTypeAdapter + tpa, ok := tp.(types.Adapter) if ok { ta = tpa } rtWithTypes := &DeclTypeProvider{ - typeProvider: tp, - typeAdapter: ta, - registeredTypes: rt.registeredTypes, + typeProvider: tp, + typeAdapter: ta, + registeredTypes: rt.registeredTypes, + recognizeKeywordAsFieldName: rt.recognizeKeywordAsFieldName, } for name, declType := range rt.registeredTypes { - tpType, found := tp.FindType(name) - expT, err := declType.ExprType() - if err != nil { - return nil, fmt.Errorf("fail to get cel type: %s", err) - } - if found && !proto.Equal(tpType, expT) { + tpType, found := tp.FindStructType(name) + // cast celType to types.type + + expT := declType.CelType() + if found && !expT.IsExactType(tpType) { return nil, fmt.Errorf( "type %s definition differs between CEL environment and type provider", name) } + } return rtWithTypes, nil } -// FindType attempts to resolve the typeName provided from the rule's rule-schema, or if not +// FindStructType attempts to resolve the typeName provided from the rule's rule-schema, or if not // from the embedded ref.TypeProvider. // -// FindType overrides the default type-finding behavior of the embedded TypeProvider. +// FindStructType overrides the default type-finding behavior of the embedded TypeProvider. // // Note, when the type name is based on the Open API Schema, the name will reflect the object path // where the type definition appears. -func (rt *DeclTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { +func (rt *DeclTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if rt == nil { return nil, false } declType, found := rt.findDeclType(typeName) if found { - expT, err := declType.ExprType() - if err != nil { - return expT, false - } + expT := declType.CelType() return expT, found } - return rt.typeProvider.FindType(typeName) + return rt.typeProvider.FindStructType(typeName) } // FindDeclType returns the CPT type description which can be mapped to a CEL type. @@ -439,37 +442,41 @@ func (rt *DeclTypeProvider) FindDeclType(typeName string) (*DeclType, bool) { return rt.findDeclType(typeName) } -// FindFieldType returns a field type given a type name and field name, if found. +// FindStructFieldNames returns the field names associated with the type, if the type +// is found. +func (rt *DeclTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + return []string{}, false +} + +// FindStructFieldType returns a field type given a type name and field name, if found. // // Note, the type name for an Open API Schema type is likely to be its qualified object path. // If, in the future an object instance rather than a type name were provided, the field // resolution might more accurately reflect the expected type model. However, in this case // concessions were made to align with the existing CEL interfaces. -func (rt *DeclTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) { +func (rt *DeclTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { st, found := rt.findDeclType(typeName) if !found { - return rt.typeProvider.FindFieldType(typeName, fieldName) + return rt.typeProvider.FindStructFieldType(typeName, fieldName) } f, found := st.Fields[fieldName] + if rt.recognizeKeywordAsFieldName && !found && celReservedSymbols.Has(fieldName) { + f, found = st.Fields["__"+fieldName+"__"] + } + if found { ft := f.Type - expT, err := ft.ExprType() - if err != nil { - return nil, false - } - return &ref.FieldType{ + expT := ft.CelType() + return &types.FieldType{ Type: expT, }, true } // This could be a dynamic map. if st.IsMap() { et := st.ElemType - expT, err := et.ExprType() - if err != nil { - return nil, false - } - return &ref.FieldType{ + expT := et.CelType() + return &types.FieldType{ Type: expT, }, true } @@ -576,6 +583,10 @@ var ( // labeled as Timestamp will necessarily have the same MinSerializedSize. TimestampType = NewSimpleTypeWithMinSize("timestamp", cel.TimestampType, types.Timestamp{Time: time.Time{}}, JSONDateSize) + // QuantityDeclType wraps a [QuantityType] and makes it usable with functions that expect + // a [DeclType]. + QuantityDeclType = NewSimpleTypeWithMinSize("quantity", QuantityType, Quantity{Quantity: resource.NewQuantity(0, resource.DecimalSI)}, 8) + // UintType is equivalent to the CEL 'uint' type. UintType = NewSimpleTypeWithMinSize("uint", cel.UintType, types.Uint(0), 1) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index e102a1e328..eec02e5722 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -22,6 +22,11 @@ import ( "net/http" "time" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" @@ -117,5 +122,31 @@ func GetAuthorizerAttributes(ctx context.Context) (authorizer.Attributes, error) attribs.Namespace = requestInfo.Namespace attribs.Name = requestInfo.Name + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + // parsing here makes it easy to keep the AttributesRecord type value-only and avoids any mutex copies when + // doing shallow copies in other steps. + if len(requestInfo.FieldSelector) > 0 { + fieldSelector, err := fields.ParseSelector(requestInfo.FieldSelector) + if err != nil { + attribs.FieldSelectorRequirements, attribs.FieldSelectorParsingErr = nil, err + } else { + if requirements := fieldSelector.Requirements(); len(requirements) > 0 { + attribs.FieldSelectorRequirements, attribs.FieldSelectorParsingErr = fieldSelector.Requirements(), nil + } + } + } + + if len(requestInfo.LabelSelector) > 0 { + labelSelector, err := labels.Parse(requestInfo.LabelSelector) + if err != nil { + attribs.LabelSelectorRequirements, attribs.LabelSelectorParsingErr = nil, err + } else { + if requirements, _ /*selectable*/ := labelSelector.Requirements(); len(requirements) > 0 { + attribs.LabelSelectorRequirements, attribs.LabelSelectorParsingErr = requirements, nil + } + } + } + } + return &attribs, nil } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go index 1ecf59d454..6e36ffec86 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" + "k8s.io/apiserver/pkg/endpoints/request" tracing "k8s.io/component-base/tracing" ) @@ -32,6 +33,14 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler { otelhttp.WithPropagators(tracing.Propagators()), otelhttp.WithPublicEndpoint(), otelhttp.WithTracerProvider(tp), + otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { + ctx := r.Context() + info, exist := request.RequestInfoFrom(ctx) + if !exist || !info.IsResourceRequest { + return r.Method + } + return getSpanNameFromRequestInfo(info, r) + }), } wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Add the http.target attribute to the otelhttp span @@ -45,3 +54,22 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler { // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...) } + +func getSpanNameFromRequestInfo(info *request.RequestInfo, r *http.Request) string { + spanName := "/" + info.APIPrefix + if info.APIGroup != "" { + spanName += "/" + info.APIGroup + } + spanName += "/" + info.APIVersion + if info.Namespace != "" { + spanName += "/namespaces/{:namespace}" + } + spanName += "/" + info.Resource + if info.Name != "" { + spanName += "/" + "{:name}" + } + if info.Subresource != "" { + spanName += "/" + info.Subresource + } + return r.Method + " " + spanName +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/equality.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/equality.go index a1f27f1d10..6065607d5a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/equality.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/equality.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/endpoints/metrics" @@ -51,7 +52,7 @@ func getAvoidTimestampEqualities() conversion.Equalities { } var eqs = equality.Semantic.Copy() - err := eqs.AddFunc( + err := eqs.AddFuncs( func(a, b metav1.ManagedFieldsEntry) bool { // Two objects' managed fields are equivalent if, ignoring timestamp, // the objects are deeply equal. @@ -59,6 +60,14 @@ func getAvoidTimestampEqualities() conversion.Equalities { b.Time = nil return reflect.DeepEqual(a, b) }, + func(a, b unstructured.Unstructured) bool { + // Check if the managed fields are equal by converting to structured types and leveraging the above + // function, then, ignoring the managed fields, equality check the rest of the unstructured data. + if !avoidTimestampEqualities.DeepEqual(a.GetManagedFields(), b.GetManagedFields()) { + return false + } + return equalIgnoringValueAtPath(a.Object, b.Object, []string{"metadata", "managedFields"}) + }, ) if err != nil { @@ -70,6 +79,36 @@ func getAvoidTimestampEqualities() conversion.Equalities { return avoidTimestampEqualities } +func equalIgnoringValueAtPath(a, b any, path []string) bool { + if len(path) == 0 { // found the value to ignore + return true + } + aMap, aOk := a.(map[string]any) + bMap, bOk := b.(map[string]any) + if !aOk || !bOk { + // Can't traverse into non-maps, ignore + return true + } + if len(aMap) != len(bMap) { + return false + } + pathHead := path[0] + for k, aVal := range aMap { + bVal, ok := bMap[k] + if !ok { + return false + } + if k == pathHead { + if !equalIgnoringValueAtPath(aVal, bVal, path[1:]) { + return false + } + } else if !avoidTimestampEqualities.DeepEqual(aVal, bVal) { + return false + } + } + return true +} + // IgnoreManagedFieldsTimestampsTransformer reverts timestamp updates // if the non-managed parts of the object are equivalent func IgnoreManagedFieldsTimestampsTransformer( @@ -152,14 +191,20 @@ func IgnoreManagedFieldsTimestampsTransformer( return newObj, nil } + eqFn := equalities.DeepEqual + if _, ok := newObj.(*unstructured.Unstructured); ok { + // Use strict equality with unstructured + eqFn = equalities.DeepEqualWithNilDifferentFromEmpty + } + // This condition ensures the managed fields are always compared first. If // this check fails, the if statement will short circuit. If the check // succeeds the slow path is taken which compares entire objects. - if !equalities.DeepEqualWithNilDifferentFromEmpty(oldManagedFields, newManagedFields) { + if !eqFn(oldManagedFields, newManagedFields) { return newObj, nil } - if equalities.DeepEqualWithNilDifferentFromEmpty(newObj, oldObj) { + if eqFn(newObj, oldObj) { // Remove any changed timestamps, so that timestamp is not the only // change seen by etcd. // diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go index d928deed09..3cc4a728a5 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -41,7 +41,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" - genericfilters "k8s.io/apiserver/pkg/server/filters" + "k8s.io/apiserver/pkg/server/routine" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" "k8s.io/klog/v2" @@ -285,7 +285,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc } // Run watch serving in a separate goroutine to allow freeing current stack memory - t := genericfilters.TaskFrom(req.Context()) + t := routine.TaskFrom(req.Context()) if t != nil { t.Func = serve } else { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 8209efef70..14278aed74 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -23,8 +23,8 @@ import ( "strings" "time" - jsonpatch "github.com/evanphx/json-patch" "go.opentelemetry.io/otel/attribute" + jsonpatch "gopkg.in/evanphx/json-patch.v4" kjson "sigs.k8s.io/json" "k8s.io/apimachinery/pkg/api/errors" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 2558494bd9..808943d161 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -27,6 +27,8 @@ import ( metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" ) @@ -62,6 +64,13 @@ type RequestInfo struct { Name string // Parts are the path parts for the request, always starting with /{resource}/{name} Parts []string + + // FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + FieldSelector string + // LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + LabelSelector string } // specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal @@ -77,6 +86,9 @@ var specialVerbsNoSubresources = sets.NewString("proxy") // this list allows the parser to distinguish between a namespace subresource, and a namespaced resource var namespaceSubresources = sets.NewString("status", "finalize") +// verbsWithSelectors is the list of verbs which support fieldSelector and labelSelector parameters +var verbsWithSelectors = sets.NewString("list", "watch", "deletecollection") + // NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...) @@ -151,6 +163,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er currentParts = currentParts[1:] // handle input of form /{specialVerb}/* + verbViaPathPrefix := false if specialVerbs.Has(currentParts[0]) { if len(currentParts) < 2 { return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL) @@ -158,6 +171,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er requestInfo.Verb = currentParts[0] currentParts = currentParts[1:] + verbViaPathPrefix = true } else { switch req.Method { @@ -238,11 +252,28 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er } } } + // if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" { requestInfo.Verb = "deletecollection" } + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + // Don't support selector authorization on requests that used the deprecated verb-via-path mechanism, since they don't support selectors consistently. + // There are multi-object and single-object watch endpoints, and only the multi-object one supports selectors. + if !verbViaPathPrefix && verbsWithSelectors.Has(requestInfo.Verb) { + // interestingly these are parsed above, but the current structure there means that if one (or anything) in the + // listOptions fails to decode, the field and label selectors are lost. + // therefore, do the straight query param read here. + if vals := req.URL.Query()["fieldSelector"]; len(vals) > 0 { + requestInfo.FieldSelector = vals[0] + } + if vals := req.URL.Query()["labelSelector"]; len(vals) > 0 { + requestInfo.LabelSelector = vals[0] + } + } + } + return &requestInfo, nil } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go index b7b7f897c6..435af8e7cf 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go @@ -274,6 +274,7 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string mutatingWebhookLatencyKey = "apiserver.latency.k8s.io/mutating-webhook" validatingWebhookLatencyKey = "apiserver.latency.k8s.io/validating-webhook" decodeLatencyKey = "apiserver.latency.k8s.io/decode-response-object" + apfQueueWaitLatencyKey = "apiserver.latency.k8s.io/apf-queue-wait" ) tracker, ok := LatencyTrackersFrom(ctx) @@ -303,6 +304,8 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string if latency := tracker.DecodeTracker.GetLatency(); latency != 0 { annotations[decodeLatencyKey] = latency.String() } - + if latency := tracker.APFQueueWaitTracker.GetLatency(); latency != 0 { + annotations[apfQueueWaitLatencyKey] = latency.String() + } return annotations } diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index bae04d9545..27d9761b82 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -18,7 +18,6 @@ package features import ( "k8s.io/apimachinery/pkg/util/runtime" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" ) @@ -53,6 +52,13 @@ const ( // caching with ETags containing all APIResources known to the apiserver. AggregatedDiscoveryEndpoint featuregate.Feature = "AggregatedDiscoveryEndpoint" + // owner: @vinayakankugoyal + // kep: https://kep.k8s.io/4633 + // alpha: v1.31 + // + // Allows us to enable anonymous auth for only certain apiserver endpoints. + AnonymousAuthConfigurableEndpoints featuregate.Feature = "AnonymousAuthConfigurableEndpoints" + // owner: @smarterclayton // alpha: v1.8 // beta: v1.9 @@ -62,16 +68,6 @@ const ( // all at once. APIListChunking featuregate.Feature = "APIListChunking" - // owner: @MikeSpreitzer @yue9944882 - // alpha: v1.18 - // beta: v1.20 - // stable: 1.29 - // - // Enables managing request concurrency with prioritization and fairness at each server. - // The FeatureGate was introduced in release 1.15 but the feature - // was not really implemented before 1.18. - APIPriorityAndFairness featuregate.Feature = "APIPriorityAndFairness" - // owner: @ilackams // alpha: v1.7 // beta: v1.16 @@ -98,6 +94,18 @@ const ( // Enables serving watch requests in separate goroutines. APIServingWithRoutine featuregate.Feature = "APIServingWithRoutine" + // owner: @deads2k + // kep: https://kep.k8s.io/4601 + // alpha: v1.31 + // + // Allows authorization to use field and label selectors. + AuthorizeWithSelectors featuregate.Feature = "AuthorizeWithSelectors" + + // owner: @serathius + // beta: v1.31 + // Enables concurrent watch object decoding to avoid starving watch cache when conversion webhook is installed. + ConcurrentWatchObjectDecode featuregate.Feature = "ConcurrentWatchObjectDecode" + // owner: @cici37 @jpbetz // kep: http://kep.k8s.io/3488 // alpha: v1.26 @@ -108,14 +116,12 @@ const ( // Enables expression validation in Admission Control ValidatingAdmissionPolicy featuregate.Feature = "ValidatingAdmissionPolicy" - // owner: @cici37 - // kep: https://kep.k8s.io/2876 - // alpha: v1.23 - // beta: v1.25 - // stable: v1.29 + // owner: @jefftree + // kep: https://kep.k8s.io/4355 + // alpha: v1.31 // - // Enables expression validation for Custom Resource - CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions" + // Enables coordinated leader election in the API server + CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection" // alpha: v1.20 // beta: v1.21 @@ -173,6 +179,13 @@ const ( // to a chunking list request. RemainingItemCount featuregate.Feature = "RemainingItemCount" + // owner: @wojtek-t + // beta: v1.31 + // + // Enables resilient watchcache initialization to avoid controlplane + // overload. + ResilientWatchCacheInitialization featuregate.Feature = "ResilientWatchCacheInitialization" + // owner: @serathius // beta: v1.30 // @@ -276,6 +289,12 @@ const ( // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" + // owner: @wojtek-t + // beta: v1.31 + // + // Enables post-start-hook for storage readiness + WatchCacheInitializationPostStartHook featuregate.Feature = "WatchCacheInitializationPostStartHook" + // owner: @serathius // beta: 1.30 // Enables watches without resourceVersion to be served from storage. @@ -298,6 +317,7 @@ const ( // owner: @serathius // kep: http://kep.k8s.io/2340 // alpha: v1.28 + // beta: v1.31 // // Allow the API server to serve consistent lists from cache ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache" @@ -314,6 +334,17 @@ const ( func init() { runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) + runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates)) +} + +// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ + // Example: + // EmulationVersion: { + // {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + // }, } // defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. @@ -321,25 +352,29 @@ func init() { // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + AnonymousAuthConfigurableEndpoints: {Default: false, PreRelease: featuregate.Alpha}, + AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, APIServerIdentity: {Default: true, PreRelease: featuregate.Beta}, APIServerTracing: {Default: true, PreRelease: featuregate.Beta}, - APIServingWithRoutine: {Default: true, PreRelease: featuregate.Beta}, + APIServingWithRoutine: {Default: false, PreRelease: featuregate.Alpha}, + + AuthorizeWithSelectors: {Default: false, PreRelease: featuregate.Alpha}, + + ConcurrentWatchObjectDecode: {Default: false, PreRelease: featuregate.Beta}, ValidatingAdmissionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + CoordinatedLeaderElection: {Default: false, PreRelease: featuregate.Alpha}, EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, @@ -353,7 +388,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - RetryGenerateName: {Default: false, PreRelease: featuregate.Alpha}, + ResilientWatchCacheInitialization: {Default: true, PreRelease: featuregate.Beta}, + + RetryGenerateName: {Default: true, PreRelease: featuregate.Beta}, SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, @@ -377,13 +414,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WatchCacheInitializationPostStartHook: {Default: false, PreRelease: featuregate.Beta}, + WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, WatchList: {Default: false, PreRelease: featuregate.Alpha}, - ConsistentListFromCache: {Default: false, PreRelease: featuregate.Alpha}, + ConsistentListFromCache: {Default: true, PreRelease: featuregate.Beta}, ZeroLimitedNominalConcurrencyShares: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/options.go b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go index d675a258f5..44d07c0e24 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/options.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go @@ -19,6 +19,7 @@ package generic import ( "time" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/storagebackend" @@ -39,12 +40,15 @@ type RESTOptions struct { } // Implement RESTOptionsGetter so that RESTOptions can directly be used when available (i.e. tests) -func (opts RESTOptions) GetRESTOptions(schema.GroupResource) (RESTOptions, error) { +func (opts RESTOptions) GetRESTOptions(schema.GroupResource, runtime.Object) (RESTOptions, error) { return opts, nil } type RESTOptionsGetter interface { - GetRESTOptions(resource schema.GroupResource) (RESTOptions, error) + // GetRESTOptions returns the RESTOptions for the given resource and example object. + // The example object is used to determine the storage version for the resource. + // If the example object is nil, the storage version will be determined by the resource's default storage version. + GetRESTOptions(resource schema.GroupResource, example runtime.Object) (RESTOptions, error) } // StoreOptions is set of configuration options used to complete generic registries. diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index a8e01708a3..198c09f6b8 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -226,6 +226,10 @@ type Store struct { // storageVersionHash as empty in the discovery document. StorageVersioner runtime.GroupVersioner + // ReadinessCheckFunc checks if the storage is ready for accepting requests. + // The field is optional, if set needs to be thread-safe. + ReadinessCheckFunc func() error + // DestroyFunc cleans up clients used by the underlying Storage; optional. // If set, DestroyFunc has to be implemented in thread-safe way and // be prepared for being called more than once. @@ -234,6 +238,7 @@ type Store struct { // Note: the rest.StandardStorage interface aggregates the common REST verbs var _ rest.StandardStorage = &Store{} +var _ rest.StorageWithReadiness = &Store{} var _ rest.TableConvertor = &Store{} var _ GenericStore = &Store{} @@ -292,6 +297,14 @@ func (e *Store) New() runtime.Object { return e.NewFunc() } +// ReadinessCheck checks if the storage is ready for accepting requests. +func (e *Store) ReadinessCheck() error { + if e.ReadinessCheckFunc != nil { + return e.ReadinessCheckFunc() + } + return nil +} + // Destroy cleans up its resources on shutdown. func (e *Store) Destroy() { if e.DestroyFunc != nil { @@ -1518,7 +1531,7 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { return err } - opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource) + opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource, e.NewFunc()) if err != nil { return err } @@ -1614,6 +1627,9 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { } } } + if e.Storage.Storage != nil { + e.ReadinessCheckFunc = e.Storage.Storage.ReadinessCheck + } return nil } diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go index 78b6ea8b0e..03cea7bb70 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go @@ -52,6 +52,8 @@ import ( // Storage is a generic interface for RESTful storage services. // Resources which are exported to the RESTful API of apiserver need to implement this interface. It is expected // that objects may implement any of the below interfaces. +// +// Consider using StorageWithReadiness whenever possible. type Storage interface { // New returns an empty object that can be used with Create and Update after request data has been put into it. // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) @@ -63,6 +65,14 @@ type Storage interface { Destroy() } +// StorageWithReadiness extends Storage interface with the readiness check. +type StorageWithReadiness interface { + Storage + + // ReadinessCheck allows for checking storage readiness. + ReadinessCheck() error +} + // Scoper indicates what scope the resource is at. It must be specified. // It is usually provided automatically based on your strategy. type Scoper interface { diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index 81a7d6ddb3..a326e7335a 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -32,9 +32,9 @@ import ( "sync/atomic" "time" - jsonpatch "github.com/evanphx/json-patch" "github.com/google/uuid" "golang.org/x/crypto/cryptobyte" + jsonpatch "gopkg.in/evanphx/json-patch.v4" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -42,8 +42,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" - "k8s.io/apimachinery/pkg/version" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/authenticator" @@ -64,14 +64,17 @@ import ( genericfilters "k8s.io/apiserver/pkg/server/filters" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/routes" + "k8s.io/apiserver/pkg/server/routine" serverstore "k8s.io/apiserver/pkg/server/storage" storagevalue "k8s.io/apiserver/pkg/storage/value" "k8s.io/apiserver/pkg/storageversion" utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" + utilversion "k8s.io/apiserver/pkg/util/version" "k8s.io/client-go/informers" restclient "k8s.io/client-go/rest" + "k8s.io/component-base/featuregate" "k8s.io/component-base/logs" "k8s.io/component-base/metrics/features" "k8s.io/component-base/metrics/prometheus/slis" @@ -147,8 +150,11 @@ type Config struct { // done values in this values for this map are ignored. PostStartHooks map[string]PostStartHookConfigEntry - // Version will enable the /version endpoint if non-nil - Version *version.Info + // EffectiveVersion determines which apis and features are available + // based on when the api/feature lifecyle. + EffectiveVersion utilversion.EffectiveVersion + // FeatureGate is a way to plumb feature gate through if you have them. + FeatureGate featuregate.FeatureGate // AuditBackend is where audit events are sent to. AuditBackend audit.Backend // AuditPolicyRuleEvaluator makes the decision of whether and how to audit log a request. @@ -211,6 +217,10 @@ type Config struct { // twice this value. Note that it is up to the request handlers to ignore or honor this timeout. In seconds. MinRequestTimeout int + // StorageInitializationTimeout defines the maximum amount of time to wait for storage initialization + // before declaring apiserver ready. + StorageInitializationTimeout time.Duration + // This represents the maximum amount of time it should take for apiserver to complete its startup // sequence and become healthy. From apiserver's start time to when this amount of time has // elapsed, /livez will assume that unfinished post-start hooks will complete successfully and @@ -421,6 +431,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config { MaxMutatingRequestsInFlight: 200, RequestTimeout: time.Duration(60) * time.Second, MinRequestTimeout: 1800, + StorageInitializationTimeout: time.Minute, LivezGracePeriod: time.Duration(0), ShutdownDelayDuration: time.Duration(0), // 1.5MB is the default client request size in bytes @@ -585,7 +596,7 @@ func (c *Config) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { } } -func completeOpenAPI(config *openapicommon.Config, version *version.Info) { +func completeOpenAPI(config *openapicommon.Config, version *version.Version) { if config == nil { return } @@ -624,7 +635,7 @@ func completeOpenAPI(config *openapicommon.Config, version *version.Info) { } } -func completeOpenAPIV3(config *openapicommon.OpenAPIV3Config, version *version.Info) { +func completeOpenAPIV3(config *openapicommon.OpenAPIV3Config, version *version.Version) { if config == nil { return } @@ -676,6 +687,9 @@ func (c *Config) ShutdownInitiatedNotify() <-chan struct{} { // Complete fills in any fields not set that are required to have valid data and can be derived // from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { + if c.FeatureGate == nil { + c.FeatureGate = utilfeature.DefaultFeatureGate + } if len(c.ExternalAddress) == 0 && c.PublicAddress != nil { c.ExternalAddress = c.PublicAddress.String() } @@ -691,9 +705,8 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo } c.ExternalAddress = net.JoinHostPort(c.ExternalAddress, strconv.Itoa(port)) } - - completeOpenAPI(c.OpenAPIConfig, c.Version) - completeOpenAPIV3(c.OpenAPIV3Config, c.Version) + completeOpenAPI(c.OpenAPIConfig, c.EffectiveVersion.EmulationVersion()) + completeOpenAPIV3(c.OpenAPIV3Config, c.EffectiveVersion.EmulationVersion()) if c.DiscoveryAddresses == nil { c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress} @@ -711,7 +724,7 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo } else { c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistryWithIdentity(func(groupResource schema.GroupResource) string { // use the storage prefix as the key if possible - if opts, err := c.RESTOptionsGetter.GetRESTOptions(groupResource); err == nil { + if opts, err := c.RESTOptionsGetter.GetRESTOptions(groupResource, nil); err == nil { return opts.ResourcePrefix } // otherwise return "" to use the default key (parent GV name) @@ -817,14 +830,16 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G ShutdownSendRetryAfter: c.ShutdownSendRetryAfter, APIServerID: c.APIServerID, + StorageReadinessHook: NewStorageReadinessHook(c.StorageInitializationTimeout), StorageVersionManager: c.StorageVersionManager, - Version: c.Version, + EffectiveVersion: c.EffectiveVersion, + FeatureGate: c.FeatureGate, muxAndDiscoveryCompleteSignals: map[string]<-chan struct{}{}, } - if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) { + if c.FeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) { manager := c.AggregatedDiscoveryGroupManager if manager == nil { manager = discoveryendpoint.NewResourceManager("apis") @@ -1039,15 +1054,15 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericfilters.WithRetryAfter(handler, c.lifecycleSignals.NotAcceptingNewRequest.Signaled()) } handler = genericfilters.WithHTTPLogging(handler) - if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) { + if c.FeatureGate.Enabled(genericfeatures.APIServerTracing) { handler = genericapifilters.WithTracing(handler, c.TracerProvider) } handler = genericapifilters.WithLatencyTrackers(handler) // WithRoutine will execute future handlers in a separate goroutine and serving // handler in current goroutine to minimize the stack memory usage. It must be // after WithPanicRecover() to be protected from panics. - if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServingWithRoutine) { - handler = genericfilters.WithRoutine(handler, c.LongRunningFunc) + if c.FeatureGate.Enabled(genericfeatures.APIServingWithRoutine) { + handler = routine.WithRoutine(handler, c.LongRunningFunc) } handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver) handler = genericapifilters.WithRequestReceivedTimestamp(handler) @@ -1087,10 +1102,10 @@ func installAPI(s *GenericAPIServer, c *Config) { } } - routes.Version{Version: c.Version}.Install(s.Handler.GoRestfulContainer) + routes.Version{Version: c.EffectiveVersion.BinaryVersion().Info()}.Install(s.Handler.GoRestfulContainer) if c.EnableDiscovery { - if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) { + if c.FeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) { wrapped := discoveryendpoint.WrapAggregatedDiscoveryToHandler(s.DiscoveryGroupManager, s.AggregatedDiscoveryGroupManager) s.Handler.GoRestfulContainer.Add(wrapped.GenerateWebService("/apis", metav1.APIGroupList{})) } else { diff --git a/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go b/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go index f14ed9213d..28cc0bf05d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go +++ b/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go @@ -17,6 +17,7 @@ limitations under the License. package server import ( + "fmt" "os" "strconv" "strings" @@ -25,16 +26,15 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" - apimachineryversion "k8s.io/apimachinery/pkg/version" + apimachineryversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/klog/v2" ) // resourceExpirationEvaluator holds info for deciding if a particular rest.Storage needs to excluded from the API type resourceExpirationEvaluator struct { - currentMajor int - currentMinor int - isAlpha bool + currentVersion *apimachineryversion.Version + isAlpha bool // This is usually set for testing for which tests need to be removed. This prevent insta-failing CI. // Set KUBE_APISERVER_STRICT_REMOVED_API_HANDLING_IN_ALPHA to see what will be removed when we tag beta strictRemovedHandlingInAlpha bool @@ -53,30 +53,17 @@ type ResourceExpirationEvaluator interface { ShouldServeForVersion(majorRemoved, minorRemoved int) bool } -func NewResourceExpirationEvaluator(currentVersion apimachineryversion.Info) (ResourceExpirationEvaluator, error) { +func NewResourceExpirationEvaluator(currentVersion *apimachineryversion.Version) (ResourceExpirationEvaluator, error) { + if currentVersion == nil { + return nil, fmt.Errorf("empty NewResourceExpirationEvaluator currentVersion") + } + klog.V(1).Infof("NewResourceExpirationEvaluator with currentVersion: %s.", currentVersion) ret := &resourceExpirationEvaluator{ strictRemovedHandlingInAlpha: false, } - if len(currentVersion.Major) > 0 { - currentMajor64, err := strconv.ParseInt(currentVersion.Major, 10, 32) - if err != nil { - return nil, err - } - ret.currentMajor = int(currentMajor64) - } - if len(currentVersion.Minor) > 0 { - // split the "normal" + and - for semver stuff - minorString := strings.Split(currentVersion.Minor, "+")[0] - minorString = strings.Split(minorString, "-")[0] - minorString = strings.Split(minorString, ".")[0] - currentMinor64, err := strconv.ParseInt(minorString, 10, 32) - if err != nil { - return nil, err - } - ret.currentMinor = int(currentMinor64) - } - - ret.isAlpha = strings.Contains(currentVersion.GitVersion, "alpha") + // Only keeps the major and minor versions from input version. + ret.currentVersion = apimachineryversion.MajorMinor(currentVersion.Major(), currentVersion.Minor()) + ret.isAlpha = strings.Contains(currentVersion.PreRelease(), "alpha") if envString, ok := os.LookupEnv("KUBE_APISERVER_STRICT_REMOVED_API_HANDLING_IN_ALPHA"); !ok { // do nothing @@ -112,6 +99,15 @@ func (e *resourceExpirationEvaluator) shouldServe(gv schema.GroupVersion, versio return false } + introduced, ok := versionedPtr.(introducedInterface) + if ok { + majorIntroduced, minorIntroduced := introduced.APILifecycleIntroduced() + verIntroduced := apimachineryversion.MajorMinor(uint(majorIntroduced), uint(minorIntroduced)) + if e.currentVersion.LessThan(verIntroduced) { + return false + } + } + removed, ok := versionedPtr.(removedInterface) if !ok { return true @@ -121,16 +117,11 @@ func (e *resourceExpirationEvaluator) shouldServe(gv schema.GroupVersion, versio } func (e *resourceExpirationEvaluator) ShouldServeForVersion(majorRemoved, minorRemoved int) bool { - if e.currentMajor < majorRemoved { + removedVer := apimachineryversion.MajorMinor(uint(majorRemoved), uint(minorRemoved)) + if removedVer.GreaterThan(e.currentVersion) { return true } - if e.currentMajor > majorRemoved { - return false - } - if e.currentMinor < minorRemoved { - return true - } - if e.currentMinor > minorRemoved { + if removedVer.LessThan(e.currentVersion) { return false } // at this point major and minor are equal, so this API should be removed when the current release GAs. @@ -152,6 +143,11 @@ type removedInterface interface { APILifecycleRemoved() (major, minor int) } +// Object interface generated from "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +type introducedInterface interface { + APILifecycleIntroduced() (major, minor int) +} + // removeDeletedKinds inspects the storage map and modifies it in place by removing storage for kinds that have been deleted. // versionedResourcesStorageMap mirrors the field on APIGroupInfo, it's a map from version to resource to the storage. func (e *resourceExpirationEvaluator) RemoveDeletedKinds(groupName string, versioner runtime.ObjectVersioner, versionedResourcesStorageMap map[string]map[string]rest.Storage) { @@ -171,6 +167,8 @@ func (e *resourceExpirationEvaluator) RemoveDeletedKinds(groupName string, versi } klog.V(1).Infof("Removing resource %v.%v.%v because it is time to stop serving it per APILifecycle.", resourceName, apiVersion, groupName) + storage := versionToResource[resourceName] + storage.Destroy() delete(versionToResource, resourceName) } versionedResourcesStorageMap[apiVersion] = versionToResource diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go index 428fd66bae..845a45fab3 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go @@ -54,7 +54,7 @@ type ConfigMapCAController struct { listeners []Listener - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // preRunCaches are the caches to sync before starting the work of this control loop preRunCaches []cache.InformerSynced } @@ -94,7 +94,10 @@ func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, k configmapLister: configmapLister, configMapInformer: uncastConfigmapInformer, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)}, + ), preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced}, } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go index 75bc49e993..1f32adf9e4 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -60,7 +60,7 @@ type DynamicFileCAContent struct { listeners []Listener // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } var _ Notifier = &DynamicFileCAContent{} @@ -82,7 +82,10 @@ func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAConten ret := &DynamicFileCAContent{ name: name, filename: filename, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)}, + ), } if err := ret.loadCABundle(); err != nil { return nil, err diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go index 62aef4992c..e0dd0474b1 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -47,7 +47,7 @@ type DynamicCertKeyPairContent struct { listeners []Listener // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } var _ CertKeyContentProvider = &DynamicCertKeyPairContent{} @@ -64,7 +64,10 @@ func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*Dyna name: name, certFile: certFile, keyFile: keyFile, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)}, + ), } if err := ret.loadCertKeyPair(); err != nil { return nil, err diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go index 56e7ffd275..61bd6fded6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go @@ -56,7 +56,7 @@ type DynamicServingCertificateController struct { currentServingTLSConfig atomic.Value // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] eventRecorder events.EventRecorder } @@ -76,7 +76,10 @@ func NewDynamicServingCertificateController( servingCert: servingCert, sniCerts: sniCerts, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicServingCertificateController"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicServingCertificateController"}, + ), eventRecorder: eventRecorder, } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go index 432851a631..e943873221 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -137,9 +137,7 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } metrics.RecordRequestPostTimeout(metrics.PostTimeoutSourceTimeoutHandler, status) - err := fmt.Errorf("post-timeout activity - time-elapsed: %s, %v %q result: %v", - time.Since(timedOutAt), r.Method, r.URL.Path, res) - utilruntime.HandleError(err) + utilruntime.HandleErrorWithContext(r.Context(), nil, "Post-timeout activity", "timeElapsed", time.Since(timedOutAt), "method", r.Method, "path", r.URL.Path, "result", res) }() }() httplog.SetStacktracePredicate(r.Context(), func(status int) bool { diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go index a981eae78a..73ce270260 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -17,7 +17,6 @@ limitations under the License. package filters import ( - "fmt" "net/http" "k8s.io/apimachinery/pkg/util/runtime" @@ -51,7 +50,7 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve // This call can have different handlers, but the default chain rate limits. Call it after the metrics are updated // in case the rate limit delays it. If you outrun the rate for this one timed out requests, something has gone // seriously wrong with your server, but generally having a logging signal for timeouts is useful. - runtime.HandleError(fmt.Errorf("timeout or abort while handling: method=%v URI=%q audit-ID=%q", req.Method, req.RequestURI, audit.GetAuditIDTruncated(req.Context()))) + runtime.HandleErrorWithContext(req.Context(), nil, "Timeout or abort while handling", "method", req.Method, "URI", req.RequestURI, "auditID", audit.GetAuditIDTruncated(req.Context())) return } http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 6066c0fcba..52fb449ac6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -38,8 +38,8 @@ import ( "k8s.io/apimachinery/pkg/util/managedfields" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" - "k8s.io/apimachinery/pkg/version" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authorization/authorizer" @@ -51,8 +51,9 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/routes" "k8s.io/apiserver/pkg/storageversion" - utilfeature "k8s.io/apiserver/pkg/util/feature" + utilversion "k8s.io/apiserver/pkg/util/version" restclient "k8s.io/client-go/rest" + "k8s.io/component-base/featuregate" "k8s.io/klog/v2" openapibuilder3 "k8s.io/kube-openapi/pkg/builder3" openapicommon "k8s.io/kube-openapi/pkg/common" @@ -232,11 +233,18 @@ type GenericAPIServer struct { // APIServerID is the ID of this API server APIServerID string + // StorageReadinessHook implements post-start-hook functionality for checking readiness + // of underlying storage for registered resources. + StorageReadinessHook *StorageReadinessHook + // StorageVersionManager holds the storage versions of the API resources installed by this server. StorageVersionManager storageversion.Manager - // Version will enable the /version endpoint if non-nil - Version *version.Info + // EffectiveVersion determines which apis and features are available + // based on when the api/feature lifecyle. + EffectiveVersion utilversion.EffectiveVersion + // FeatureGate is a way to plumb feature gate through if you have them. + FeatureGate featuregate.FeatureGate // lifecycleSignals provides access to the various signals that happen during the life cycle of the apiserver. lifecycleSignals lifecycleSignals @@ -442,9 +450,19 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { // Run spawns the secure http server. It only returns if stopCh is closed // or the secure port cannot be listened on initially. -// This is the diagram of what channels/signals are dependent on each other: // -// | stopCh +// Deprecated: use RunWithContext instead. Run will not get removed to avoid +// breaking consumers, but should not be used in new code. +func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { + ctx := wait.ContextForChannel(stopCh) + return s.RunWithContext(ctx) +} + +// RunWithContext spawns the secure http server. It only returns if ctx is canceled +// or the secure port cannot be listened on initially. +// This is the diagram of what contexts/channels/signals are dependent on each other: +// +// | ctx // | | // | --------------------------------------------------------- // | | | @@ -477,12 +495,13 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { // | | | | // | |-------------------|---------------------|----------------------------------------| // | | | -// | stopHttpServerCh (AuditBackend::Shutdown()) +// | stopHttpServerCtx (AuditBackend::Shutdown()) // | | // | listenerStoppedCh // | | // | HTTPServerStoppedListening (httpServerStoppedListeningCh) -func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { +func (s preparedGenericAPIServer) RunWithContext(ctx context.Context) error { + stopCh := ctx.Done() delayedStopCh := s.lifecycleSignals.AfterShutdownDelayDuration shutdownInitiatedCh := s.lifecycleSignals.ShutdownInitiated @@ -544,9 +563,11 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { notAcceptingNewRequestCh := s.lifecycleSignals.NotAcceptingNewRequest drainedCh := s.lifecycleSignals.InFlightRequestsDrained - stopHttpServerCh := make(chan struct{}) + // Canceling the parent context does not immediately cancel the HTTP server. + // We only inherit context values here and deal with cancellation ourselves. + stopHTTPServerCtx, stopHTTPServer := context.WithCancelCause(context.WithoutCancel(ctx)) go func() { - defer close(stopHttpServerCh) + defer stopHTTPServer(errors.New("time to stop HTTP server")) timeToStopHttpServerCh := notAcceptingNewRequestCh.Signaled() if s.ShutdownSendRetryAfter { @@ -565,7 +586,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { } } - stoppedCh, listenerStoppedCh, err := s.NonBlockingRun(stopHttpServerCh, shutdownTimeout) + stoppedCh, listenerStoppedCh, err := s.NonBlockingRunWithContext(stopHTTPServerCtx, shutdownTimeout) if err != nil { return err } @@ -694,7 +715,18 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { // NonBlockingRun spawns the secure http server. An error is // returned if the secure port cannot be listened on. // The returned channel is closed when the (asynchronous) termination is finished. +// +// Deprecated: use RunWithContext instead. Run will not get removed to avoid +// breaking consumers, but should not be used in new code. func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdownTimeout time.Duration) (<-chan struct{}, <-chan struct{}, error) { + ctx := wait.ContextForChannel(stopCh) + return s.NonBlockingRunWithContext(ctx, shutdownTimeout) +} + +// NonBlockingRunWithContext spawns the secure http server. An error is +// returned if the secure port cannot be listened on. +// The returned channel is closed when the (asynchronous) termination is finished. +func (s preparedGenericAPIServer) NonBlockingRunWithContext(ctx context.Context, shutdownTimeout time.Duration) (<-chan struct{}, <-chan struct{}, error) { // Use an internal stop channel to allow cleanup of the listeners on error. internalStopCh := make(chan struct{}) var stoppedCh <-chan struct{} @@ -712,11 +744,11 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdow // responsibility of the caller to close the provided channel to // ensure cleanup. go func() { - <-stopCh + <-ctx.Done() close(internalStopCh) }() - s.RunPostStartHooks(stopCh) + s.RunPostStartHooks(ctx) if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil { klog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) @@ -751,7 +783,7 @@ func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *A } resourceInfos = append(resourceInfos, r...) - if utilfeature.DefaultFeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) { + if s.FeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) { // Aggregated discovery only aggregates resources under /apis if apiPrefix == APIGroupPrefix { s.AggregatedDiscoveryGroupManager.AddGroupVersion( @@ -779,8 +811,8 @@ func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *A s.RegisterDestroyFunc(apiGroupInfo.destroyStorage) - if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionAPI) && - utilfeature.DefaultFeatureGate.Enabled(features.APIServerIdentity) { + if s.FeatureGate.Enabled(features.StorageVersionAPI) && + s.FeatureGate.Enabled(features.APIServerIdentity) { // API installation happens before we start listening on the handlers, // therefore it is safe to register ResourceInfos here. The handler will block // write requests until the storage versions of the targeting resources are updated. @@ -810,12 +842,13 @@ func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo // Install the version handler. // Add a handler at / to enumerate the supported api versions. legacyRootAPIHandler := discovery.NewLegacyRootAPIHandler(s.discoveryAddresses, s.Serializer, apiPrefix) - if utilfeature.DefaultFeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) { + if s.FeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) { wrapped := discoveryendpoint.WrapAggregatedDiscoveryToHandler(legacyRootAPIHandler, s.AggregatedLegacyDiscoveryGroupManager) s.Handler.GoRestfulContainer.Add(wrapped.GenerateWebService("/api", metav1.APIVersions{})) } else { s.Handler.GoRestfulContainer.Add(legacyRootAPIHandler.WebService()) } + s.registerStorageReadinessCheck("", apiGroupInfo) return nil } @@ -874,10 +907,28 @@ func (s *GenericAPIServer) InstallAPIGroups(apiGroupInfos ...*APIGroupInfo) erro s.DiscoveryGroupManager.AddGroup(apiGroup) s.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(s.Serializer, apiGroup).WebService()) + s.registerStorageReadinessCheck(apiGroupInfo.PrioritizedVersions[0].Group, apiGroupInfo) } return nil } +// registerStorageReadinessCheck registers the readiness checks for all underlying storages +// for a given APIGroup. +func (s *GenericAPIServer) registerStorageReadinessCheck(groupName string, apiGroupInfo *APIGroupInfo) { + for version, storageMap := range apiGroupInfo.VersionedResourcesStorageMap { + for resource, storage := range storageMap { + if withReadiness, ok := storage.(rest.StorageWithReadiness); ok { + gvr := metav1.GroupVersionResource{ + Group: groupName, + Version: version, + Resource: resource, + } + s.StorageReadinessHook.RegisterStorage(gvr, withReadiness) + } + } + } +} + // InstallAPIGroup exposes the given api group in the API. // The passed into this function shouldn't be used elsewhere as the // underlying storage will be destroyed on this servers shutdown. diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz.go index f4b564c48f..96da308e8c 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz.go @@ -118,7 +118,7 @@ func (s *GenericAPIServer) AddLivezChecks(delay time.Duration, checks ...healthz // that we can register that the api-server is no longer ready while we attempt to gracefully // shutdown. func (s *GenericAPIServer) addReadyzShutdownCheck(stopCh <-chan struct{}) error { - return s.AddReadyzChecks(shutdownCheck{stopCh}) + return s.AddReadyzChecks(healthz.NewShutdownHealthz(stopCh)) } // installHealthz creates the healthz endpoint for this server @@ -139,25 +139,6 @@ func (s *GenericAPIServer) installLivez() { s.livezRegistry.installHandler(s.Handler.NonGoRestfulMux) } -// shutdownCheck fails if the embedded channel is closed. This is intended to allow for graceful shutdown sequences -// for the apiserver. -type shutdownCheck struct { - StopCh <-chan struct{} -} - -func (shutdownCheck) Name() string { - return "shutdown" -} - -func (c shutdownCheck) Check(req *http.Request) error { - select { - case <-c.StopCh: - return fmt.Errorf("process is shutting down") - default: - } - return nil -} - // delayedHealthCheck wraps a health check which will not fail until the explicitly defined delay has elapsed. This // is intended for use primarily for livez health checks. func delayedHealthCheck(check healthz.HealthChecker, clock clock.Clock, delay time.Duration) healthz.HealthChecker { diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index 4613fd6d17..76f5745b33 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -105,6 +105,29 @@ func (i *informerSync) Name() string { return "informer-sync" } +type shutdown struct { + stopCh <-chan struct{} +} + +// NewShutdownHealthz returns a new HealthChecker that will fail if the embedded channel is closed. +// This is intended to allow for graceful shutdown sequences. +func NewShutdownHealthz(stopCh <-chan struct{}) HealthChecker { + return &shutdown{stopCh} +} + +func (s *shutdown) Name() string { + return "shutdown" +} + +func (s *shutdown) Check(req *http.Request) error { + select { + case <-s.stopCh: + return fmt.Errorf("process is shutting down") + default: + } + return nil +} + func (i *informerSync) Check(_ *http.Request) error { stopCh := make(chan struct{}) // Close stopCh to force checking if informers are synced now. diff --git a/vendor/k8s.io/apiserver/pkg/server/hooks.go b/vendor/k8s.io/apiserver/pkg/server/hooks.go index 065df6bc5c..1561d7a847 100644 --- a/vendor/k8s.io/apiserver/pkg/server/hooks.go +++ b/vendor/k8s.io/apiserver/pkg/server/hooks.go @@ -17,6 +17,7 @@ limitations under the License. package server import ( + "context" "errors" "fmt" "net/http" @@ -48,8 +49,13 @@ type PreShutdownHookFunc func() error type PostStartHookContext struct { // LoopbackClientConfig is a config for a privileged loopback connection to the API server LoopbackClientConfig *restclient.Config - // StopCh is the channel that will be closed when the server stops + // StopCh is the channel that will be closed when the server stops. + // + // Deprecated: use the PostStartHookContext itself instead, it contains a context that + // gets cancelled when the server stops. StopCh keeps getting provided for existing code. StopCh <-chan struct{} + // Context gets cancelled when the server stops. + context.Context } // PostStartHookProvider is an interface in addition to provide a post start hook for the api server @@ -151,15 +157,16 @@ func (s *GenericAPIServer) AddPreShutdownHookOrDie(name string, hook PreShutdown } } -// RunPostStartHooks runs the PostStartHooks for the server -func (s *GenericAPIServer) RunPostStartHooks(stopCh <-chan struct{}) { +// RunPostStartHooks runs the PostStartHooks for the server. +func (s *GenericAPIServer) RunPostStartHooks(ctx context.Context) { s.postStartHookLock.Lock() defer s.postStartHookLock.Unlock() s.postStartHooksCalled = true context := PostStartHookContext{ LoopbackClientConfig: s.LoopbackClientConfig, - StopCh: stopCh, + StopCh: ctx.Done(), + Context: ctx, } for hookName, hookEntry := range s.postStartHooks { diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go index a4b4c5899f..c64f5771d8 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -31,6 +31,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/endpoints/responsewriter" + "k8s.io/apiserver/pkg/server/routine" "k8s.io/klog/v2" ) @@ -125,10 +126,26 @@ func withLogging(handler http.Handler, stackTracePred StacktracePred, shouldLogR rl := newLoggedWithStartTime(req, w, startTime) rl.StacktraceWhen(stackTracePred) req = req.WithContext(context.WithValue(ctx, respLoggerContextKey, rl)) - defer rl.Log() + + var logFunc func() + logFunc = rl.Log + defer func() { + if logFunc != nil { + logFunc() + } + }() w = responsewriter.WrapForHTTP1Or2(rl) handler.ServeHTTP(w, req) + + // We need to ensure that the request is logged after it is processed. + // In case the request is executed in a separate goroutine created via + // WithRoutine handler in the handler chain (i.e. above handler.ServeHTTP() + // would return request is completely responsed), we want the logging to + // happen in that goroutine too, so we append it to the task. + if routine.AppendTask(ctx, &routine.Task{Func: rl.Log}) { + logFunc = nil + } }) } diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go index beb7a8d807..3ed92d96c6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go +++ b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go @@ -96,9 +96,11 @@ func NewPathRecorderMux(name string) *PathRecorderMux { // ListedPaths returns the registered handler exposedPaths. func (m *PathRecorderMux) ListedPaths() []string { + m.lock.Lock() handledPaths := append([]string{}, m.exposedPaths...) - sort.Strings(handledPaths) + m.lock.Unlock() + sort.Strings(handledPaths) return handledPaths } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/admission.go b/vendor/k8s.io/apiserver/pkg/server/options/admission.go index 542c81a869..61085e94eb 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/admission.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/admission.go @@ -59,7 +59,7 @@ type AdmissionOptions struct { // RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default RecommendedPluginOrder []string // DefaultOffPlugins is a set of plugin names that is disabled by default - DefaultOffPlugins sets.String + DefaultOffPlugins sets.Set[string] // EnablePlugins indicates plugins to be enabled passed through `--enable-admission-plugins`. EnablePlugins []string @@ -91,7 +91,7 @@ func NewAdmissionOptions() *AdmissionOptions { // after all the mutating ones, so their relative order in this list // doesn't matter. RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingwebhook.PluginName, validatingadmissionpolicy.PluginName, validatingwebhook.PluginName}, - DefaultOffPlugins: sets.NewString(), + DefaultOffPlugins: sets.Set[string]{}, } server.RegisterAllAdmissionPlugins(options.Plugins) return options @@ -139,6 +139,9 @@ func (a *AdmissionOptions) ApplyTo( if informers == nil { return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil") } + if kubeClient == nil || dynamicClient == nil { + return fmt.Errorf("admission depends on a Kubernetes core API client, it cannot be nil") + } pluginNames := a.enabledPluginNames() @@ -223,7 +226,7 @@ func (a *AdmissionOptions) Validate() []error { // EnablePlugins, DisablePlugins fields // to prepare a list of ordered plugin names that are enabled. func (a *AdmissionOptions) enabledPluginNames() []string { - allOffPlugins := append(a.DefaultOffPlugins.List(), a.DisablePlugins...) + allOffPlugins := append(sets.List[string](a.DefaultOffPlugins), a.DisablePlugins...) disabledPlugins := sets.NewString(allOffPlugins...) enabledPlugins := sets.NewString(a.EnablePlugins...) disabledPlugins = disabledPlugins.Difference(enabledPlugins) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go index e9a61d30b9..c40e4cf434 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" "k8s.io/apiserver/pkg/authentication/request/headerrequest" "k8s.io/apiserver/pkg/server" @@ -222,8 +223,8 @@ type DelegatingAuthenticationOptions struct { // CustomRoundTripperFn allows for specifying a middleware function for custom HTTP behaviour for the authentication webhook client. CustomRoundTripperFn transport.WrapperFunc - // DisableAnonymous gives user an option to disable Anonymous authentication. - DisableAnonymous bool + // Anonymous gives user an option to enable/disable Anonymous authentication. + Anonymous *apiserver.AnonymousAuthConfig } func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { @@ -238,6 +239,7 @@ func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { }, WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), TokenRequestTimeout: 10 * time.Second, + Anonymous: &apiserver.AnonymousAuthConfig{Enabled: true}, } } @@ -305,7 +307,7 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.Aut } cfg := authenticatorfactory.DelegatingAuthenticatorConfig{ - Anonymous: !s.DisableAnonymous, + Anonymous: &apiserver.AnonymousAuthConfig{Enabled: true}, CacheTTL: s.CacheTTL, WebhookRetryBackoff: s.WebhookRetryBackoff, TokenAccessReviewTimeout: s.TokenRequestTimeout, diff --git a/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go deleted file mode 100644 index dd05bfecdb..0000000000 --- a/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "fmt" - "net" - - "github.com/spf13/pflag" - - "k8s.io/apiserver/pkg/server" -) - -// DeprecatedInsecureServingOptions are for creating an unauthenticated, unauthorized, insecure port. -// No one should be using these anymore. -// DEPRECATED: all insecure serving options are removed in a future version -type DeprecatedInsecureServingOptions struct { - BindAddress net.IP - BindPort int - // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", - // "tcp4", and "tcp6". - BindNetwork string - - // Listener is the secure server network listener. - // either Listener or BindAddress/BindPort/BindNetwork is set, - // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. - Listener net.Listener - - // ListenFunc can be overridden to create a custom listener, e.g. for mocking in tests. - // It defaults to options.CreateListener. - ListenFunc func(network, addr string, config net.ListenConfig) (net.Listener, int, error) -} - -// Validate ensures that the insecure port values within the range of the port. -func (s *DeprecatedInsecureServingOptions) Validate() []error { - if s == nil { - return nil - } - - errors := []error{} - - if s.BindPort < 0 || s.BindPort > 65535 { - errors = append(errors, fmt.Errorf("insecure port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", s.BindPort)) - } - - return errors -} - -// AddFlags adds flags related to insecure serving to the specified FlagSet. -func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) { - if s == nil { - return - } - - fs.IPVar(&s.BindAddress, "insecure-bind-address", s.BindAddress, ""+ - "The IP address on which to serve the --insecure-port (set to 0.0.0.0 or :: for listening on all interfaces and IP address families).") - // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 - fs.MarkDeprecated("insecure-bind-address", "This flag will be removed in a future version.") - fs.Lookup("insecure-bind-address").Hidden = false - - fs.IntVar(&s.BindPort, "insecure-port", s.BindPort, ""+ - "The port on which to serve unsecured, unauthenticated access.") - // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 - fs.MarkDeprecated("insecure-port", "This flag will be removed in a future version.") - fs.Lookup("insecure-port").Hidden = false -} - -// AddUnqualifiedFlags adds flags related to insecure serving without the --insecure prefix to the specified FlagSet. -func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet) { - if s == nil { - return - } - - fs.IPVar(&s.BindAddress, "address", s.BindAddress, - "The IP address on which to serve the insecure --port (set to '0.0.0.0' or '::' for listening on all interfaces and IP address families).") - fs.MarkDeprecated("address", "see --bind-address instead.") - fs.Lookup("address").Hidden = false - - fs.IntVar(&s.BindPort, "port", s.BindPort, "The port on which to serve unsecured, unauthenticated access. Set to 0 to disable.") - fs.MarkDeprecated("port", "see --secure-port instead.") - fs.Lookup("port").Hidden = false -} - -// ApplyTo adds DeprecatedInsecureServingOptions to the insecureserverinfo and kube-controller manager configuration. -// Note: the double pointer allows to set the *DeprecatedInsecureServingInfo to nil without referencing the struct hosting this pointer. -func (s *DeprecatedInsecureServingOptions) ApplyTo(c **server.DeprecatedInsecureServingInfo) error { - if s == nil { - return nil - } - if s.BindPort <= 0 { - return nil - } - - if s.Listener == nil { - var err error - listen := CreateListener - if s.ListenFunc != nil { - listen = s.ListenFunc - } - addr := net.JoinHostPort(s.BindAddress.String(), fmt.Sprintf("%d", s.BindPort)) - s.Listener, s.BindPort, err = listen(s.BindNetwork, addr, net.ListenConfig{}) - if err != nil { - return fmt.Errorf("failed to create listener: %v", err) - } - } - - *c = &server.DeprecatedInsecureServingInfo{ - Listener: s.Listener, - } - - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index 2dbf335b2b..40879f78e0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -130,7 +130,7 @@ func init() { configScheme := runtime.NewScheme() utilruntime.Must(apiserver.AddToScheme(configScheme)) utilruntime.Must(apiserverv1.AddToScheme(configScheme)) - codecs = serializer.NewCodecFactory(configScheme) + codecs = serializer.NewCodecFactory(configScheme, serializer.EnableStrict) envelopemetrics.RegisterMetrics() storagevalue.RegisterMetrics() metrics.RegisterMetrics() diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go index cde6a379ec..fd783f41a4 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go @@ -49,7 +49,7 @@ type DynamicEncryptionConfigContent struct { lastLoadedEncryptionConfigHash string // queue for processing changes in encryption config file. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // dynamicTransformers updates the transformers when encryption config file changes. dynamicTransformers *encryptionconfig.DynamicTransformers @@ -78,8 +78,11 @@ func NewDynamicEncryptionConfiguration( filePath: filePath, lastLoadedEncryptionConfigHash: configContentHash, dynamicTransformers: dynamicTransformers, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), - apiServerID: apiServerID, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: name}, + ), + apiServerID: apiServerID, getEncryptionConfigHash: func(_ context.Context, filepath string) (string, error) { return encryptionconfig.GetEncryptionConfigHash(filepath) }, @@ -150,7 +153,7 @@ func (d *DynamicEncryptionConfigContent) processNextWorkItem(serverCtx context.C return true } -func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey interface{}) { +func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey string) { var ( updatedEffectiveConfig bool err error diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index a1fc3168c5..af7696c40a 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -383,8 +383,8 @@ type StorageFactoryRestOptionsFactory struct { StorageFactory serverstorage.StorageFactory } -func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { - storageConfig, err := f.StorageFactory.NewConfig(resource) +func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupResource, example runtime.Object) (generic.RESTOptions, error) { + storageConfig, err := f.StorageFactory.NewConfig(resource, example) if err != nil { return generic.RESTOptions{}, fmt.Errorf("unable to find storage destination for %v, due to %v", resource, err.Error()) } @@ -399,6 +399,10 @@ func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupR StorageObjectCountTracker: f.Options.StorageConfig.StorageObjectCountTracker, } + if ret.StorageObjectCountTracker == nil { + ret.StorageObjectCountTracker = storageConfig.StorageObjectCountTracker + } + if f.Options.EnableWatchCache { sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) if err != nil { @@ -465,7 +469,7 @@ type SimpleStorageFactory struct { StorageConfig storagebackend.Config } -func (s *SimpleStorageFactory) NewConfig(resource schema.GroupResource) (*storagebackend.ConfigForResource, error) { +func (s *SimpleStorageFactory) NewConfig(resource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error) { return s.StorageConfig.ForResource(resource), nil } @@ -489,8 +493,8 @@ type transformerStorageFactory struct { resourceTransformers storagevalue.ResourceTransformers } -func (t *transformerStorageFactory) NewConfig(resource schema.GroupResource) (*storagebackend.ConfigForResource, error) { - config, err := t.delegate.NewConfig(resource) +func (t *transformerStorageFactory) NewConfig(resource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error) { + config, err := t.delegate.NewConfig(resource, example) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/feature.go b/vendor/k8s.io/apiserver/pkg/server/options/feature.go index f01195560a..a5b1372da4 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/feature.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/feature.go @@ -71,6 +71,9 @@ func (o *FeatureOptions) ApplyTo(c *server.Config, clientset kubernetes.Interfac c.EnableContentionProfiling = o.EnableContentionProfiling if o.EnablePriorityAndFairness { + if clientset == nil { + return fmt.Errorf("invalid configuration: priority and fairness requires a core Kubernetes client") + } if c.MaxRequestsInFlight+c.MaxMutatingRequestsInFlight <= 0 { return fmt.Errorf("invalid configuration: MaxRequestsInFlight=%d and MaxMutatingRequestsInFlight=%d; they must add up to something positive", c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index eb7e67b367..779c4e3da9 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -120,9 +120,18 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - kubeClient, err := kubernetes.NewForConfig(config.ClientConfig) - if err != nil { - return err + var kubeClient *kubernetes.Clientset + var dynamicClient *dynamic.DynamicClient + if config.ClientConfig != nil { + var err error + kubeClient, err = kubernetes.NewForConfig(config.ClientConfig) + if err != nil { + return err + } + dynamicClient, err = dynamic.NewForConfig(config.ClientConfig) + if err != nil { + return err + } } if err := o.Features.ApplyTo(&config.Config, kubeClient, config.SharedInformerFactory); err != nil { return err @@ -131,10 +140,6 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err != nil { return err } - dynamicClient, err := dynamic.NewForConfig(config.ClientConfig) - if err != nil { - return err - } if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, kubeClient, dynamicClient, o.FeatureGate, initializers...); err != nil { return err diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go index 1373d8a4d7..9246366dd6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -25,8 +25,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilversion "k8s.io/apiserver/pkg/util/version" "github.com/spf13/pflag" ) @@ -45,16 +47,17 @@ const ( type ServerRunOptions struct { AdvertiseAddress net.IP - CorsAllowedOriginList []string - HSTSDirectives []string - ExternalHost string - MaxRequestsInFlight int - MaxMutatingRequestsInFlight int - RequestTimeout time.Duration - GoawayChance float64 - LivezGracePeriod time.Duration - MinRequestTimeout int - ShutdownDelayDuration time.Duration + CorsAllowedOriginList []string + HSTSDirectives []string + ExternalHost string + MaxRequestsInFlight int + MaxMutatingRequestsInFlight int + RequestTimeout time.Duration + GoawayChance float64 + LivezGracePeriod time.Duration + MinRequestTimeout int + StorageInitializationTimeout time.Duration + ShutdownDelayDuration time.Duration // We intentionally did not add a flag for this option. Users of the // apiserver library can wire it to a flag. JSONPatchMaxCopyBytes int64 @@ -89,9 +92,24 @@ type ServerRunOptions struct { // This grace period is orthogonal to other grace periods, and // it is not overridden by any other grace period. ShutdownWatchTerminationGracePeriod time.Duration + + // ComponentGlobalsRegistry is the registry where the effective versions and feature gates for all components are stored. + ComponentGlobalsRegistry utilversion.ComponentGlobalsRegistry + // ComponentName is name under which the server's global variabled are registered in the ComponentGlobalsRegistry. + ComponentName string } func NewServerRunOptions() *ServerRunOptions { + if utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent) == nil { + featureGate := utilfeature.DefaultMutableFeatureGate + effectiveVersion := utilversion.DefaultKubeEffectiveVersion() + utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(utilversion.DefaultKubeComponent, effectiveVersion, featureGate)) + } + + return NewServerRunOptionsForComponent(utilversion.DefaultKubeComponent, utilversion.DefaultComponentGlobalsRegistry) +} + +func NewServerRunOptionsForComponent(componentName string, componentGlobalsRegistry utilversion.ComponentGlobalsRegistry) *ServerRunOptions { defaults := server.NewConfig(serializer.CodecFactory{}) return &ServerRunOptions{ MaxRequestsInFlight: defaults.MaxRequestsInFlight, @@ -99,16 +117,22 @@ func NewServerRunOptions() *ServerRunOptions { RequestTimeout: defaults.RequestTimeout, LivezGracePeriod: defaults.LivezGracePeriod, MinRequestTimeout: defaults.MinRequestTimeout, + StorageInitializationTimeout: defaults.StorageInitializationTimeout, ShutdownDelayDuration: defaults.ShutdownDelayDuration, ShutdownWatchTerminationGracePeriod: defaults.ShutdownWatchTerminationGracePeriod, JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, ShutdownSendRetryAfter: false, + ComponentName: componentName, + ComponentGlobalsRegistry: componentGlobalsRegistry, } } // ApplyTo applies the run options to the method receiver and returns self func (s *ServerRunOptions) ApplyTo(c *server.Config) error { + if err := s.ComponentGlobalsRegistry.SetFallback(); err != nil { + return err + } c.CorsAllowedOriginList = s.CorsAllowedOriginList c.HSTSDirectives = s.HSTSDirectives c.ExternalAddress = s.ExternalHost @@ -118,12 +142,15 @@ func (s *ServerRunOptions) ApplyTo(c *server.Config) error { c.RequestTimeout = s.RequestTimeout c.GoawayChance = s.GoawayChance c.MinRequestTimeout = s.MinRequestTimeout + c.StorageInitializationTimeout = s.StorageInitializationTimeout c.ShutdownDelayDuration = s.ShutdownDelayDuration c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes c.MaxRequestBodyBytes = s.MaxRequestBodyBytes c.PublicAddress = s.AdvertiseAddress c.ShutdownSendRetryAfter = s.ShutdownSendRetryAfter c.ShutdownWatchTerminationGracePeriod = s.ShutdownWatchTerminationGracePeriod + c.EffectiveVersion = s.ComponentGlobalsRegistry.EffectiveVersionFor(s.ComponentName) + c.FeatureGate = s.ComponentGlobalsRegistry.FeatureGateFor(s.ComponentName) return nil } @@ -173,6 +200,10 @@ func (s *ServerRunOptions) Validate() []error { errors = append(errors, fmt.Errorf("--min-request-timeout can not be negative value")) } + if s.StorageInitializationTimeout < 0 { + errors = append(errors, fmt.Errorf("--storage-initialization-timeout can not be negative value")) + } + if s.ShutdownDelayDuration < 0 { errors = append(errors, fmt.Errorf("--shutdown-delay-duration can not be negative value")) } @@ -196,6 +227,9 @@ func (s *ServerRunOptions) Validate() []error { if err := validateCorsAllowedOriginList(s.CorsAllowedOriginList); err != nil { errors = append(errors, err) } + if errs := s.ComponentGlobalsRegistry.Validate(); len(errs) != 0 { + errors = append(errors, errs...) + } return errors } @@ -323,6 +357,9 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "handler, which picks a randomized value above this number as the connection timeout, "+ "to spread out load.") + fs.DurationVar(&s.StorageInitializationTimeout, "storage-initialization-timeout", s.StorageInitializationTimeout, + "Maximum amount of time to wait for storage initialization before declaring apiserver ready. Defaults to 1m.") + fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+ "Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez "+ "will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+ @@ -337,5 +374,10 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "This option, if set, represents the maximum amount of grace period the apiserver will wait "+ "for active watch request(s) to drain during the graceful server shutdown window.") - utilfeature.DefaultMutableFeatureGate.AddFlag(fs) + s.ComponentGlobalsRegistry.AddFlags(fs) +} + +// Complete fills missing fields with defaults. +func (s *ServerRunOptions) Complete() error { + return s.ComponentGlobalsRegistry.SetFallback() } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go index 842ab7ee0d..21a2736e10 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -44,6 +44,8 @@ type SecureServingOptions struct { // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", // "tcp4", and "tcp6". BindNetwork string + // DisableHTTP2Serving indicates that http2 serving should not be enabled. + DisableHTTP2Serving bool // Required set to true means that BindPort cannot be zero. Required bool // ExternalAddress is the address advertised, even if BindAddress is a loopback. By default this @@ -163,6 +165,9 @@ func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) { } fs.IntVar(&s.BindPort, "secure-port", s.BindPort, desc) + fs.BoolVar(&s.DisableHTTP2Serving, "disable-http2-serving", s.DisableHTTP2Serving, + "If true, HTTP2 serving will be disabled [default=false]") + fs.StringVar(&s.ServerCert.CertDirectory, "cert-dir", s.ServerCert.CertDirectory, ""+ "The directory where the TLS certs are located. "+ "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") @@ -256,6 +261,7 @@ func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error *config = &server.SecureServingInfo{ Listener: s.Listener, HTTP2MaxStreamsPerConnection: s.HTTP2MaxStreamsPerConnection, + DisableHTTP2: s.DisableHTTP2Serving, } c := *config diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/routine.go b/vendor/k8s.io/apiserver/pkg/server/routine/routine.go similarity index 83% rename from vendor/k8s.io/apiserver/pkg/server/filters/routine.go rename to vendor/k8s.io/apiserver/pkg/server/routine/routine.go index 3f4dfa2bb2..c4b23c5903 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/routine.go +++ b/vendor/k8s.io/apiserver/pkg/server/routine/routine.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package filters +package routine import ( "context" @@ -35,6 +35,20 @@ func WithTask(parent context.Context, t *Task) context.Context { return request.WithValue(parent, taskKey, t) } +// AppendTask appends a task executed after completion of existing task. +// It is a no-op if there is no existing task. +func AppendTask(ctx context.Context, t *Task) bool { + if existTask := TaskFrom(ctx); existTask != nil && existTask.Func != nil { + existFunc := existTask.Func + existTask.Func = func() { + existFunc() + t.Func() + } + return true + } + return false +} + func TaskFrom(ctx context.Context) *Task { t, _ := ctx.Value(taskKey).(*Task) return t diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go index efb22fbc8d..d73c8e62c6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go @@ -21,6 +21,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + apimachineryversion "k8s.io/apimachinery/pkg/util/version" + "k8s.io/apiserver/pkg/util/version" ) type ResourceEncodingConfig interface { @@ -33,10 +35,15 @@ type ResourceEncodingConfig interface { InMemoryEncodingFor(schema.GroupResource) (schema.GroupVersion, error) } +type CompatibilityResourceEncodingConfig interface { + BackwardCompatibileStorageEncodingFor(schema.GroupResource, runtime.Object) (schema.GroupVersion, error) +} + type DefaultResourceEncodingConfig struct { // resources records the overriding encoding configs for individual resources. - resources map[schema.GroupResource]*OverridingResourceEncoding - scheme *runtime.Scheme + resources map[schema.GroupResource]*OverridingResourceEncoding + scheme *runtime.Scheme + effectiveVersion version.EffectiveVersion } type OverridingResourceEncoding struct { @@ -47,7 +54,7 @@ type OverridingResourceEncoding struct { var _ ResourceEncodingConfig = &DefaultResourceEncodingConfig{} func NewDefaultResourceEncodingConfig(scheme *runtime.Scheme) *DefaultResourceEncodingConfig { - return &DefaultResourceEncodingConfig{resources: map[schema.GroupResource]*OverridingResourceEncoding{}, scheme: scheme} + return &DefaultResourceEncodingConfig{resources: map[schema.GroupResource]*OverridingResourceEncoding{}, scheme: scheme, effectiveVersion: version.DefaultKubeEffectiveVersion()} } func (o *DefaultResourceEncodingConfig) SetResourceEncoding(resourceBeingStored schema.GroupResource, externalEncodingVersion, internalVersion schema.GroupVersion) { @@ -57,6 +64,10 @@ func (o *DefaultResourceEncodingConfig) SetResourceEncoding(resourceBeingStored } } +func (o *DefaultResourceEncodingConfig) SetEffectiveVersion(effectiveVersion version.EffectiveVersion) { + o.effectiveVersion = effectiveVersion +} + func (o *DefaultResourceEncodingConfig) StorageEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) { if !o.scheme.IsGroupRegistered(resource.Group) { return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group) @@ -71,6 +82,24 @@ func (o *DefaultResourceEncodingConfig) StorageEncodingFor(resource schema.Group return o.scheme.PrioritizedVersionsForGroup(resource.Group)[0], nil } +func (o *DefaultResourceEncodingConfig) BackwardCompatibileStorageEncodingFor(resource schema.GroupResource, example runtime.Object) (schema.GroupVersion, error) { + if !o.scheme.IsGroupRegistered(resource.Group) { + return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group) + } + + // Always respect overrides + resourceOverride, resourceExists := o.resources[resource] + if resourceExists { + return resourceOverride.ExternalResourceEncoding, nil + } + + return emulatedStorageVersion( + o.scheme.PrioritizedVersionsForGroup(resource.Group)[0], + example, + o.effectiveVersion, + o.scheme) +} + func (o *DefaultResourceEncodingConfig) InMemoryEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) { if !o.scheme.IsGroupRegistered(resource.Group) { return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group) @@ -82,3 +111,79 @@ func (o *DefaultResourceEncodingConfig) InMemoryEncodingFor(resource schema.Grou } return schema.GroupVersion{Group: resource.Group, Version: runtime.APIVersionInternal}, nil } + +// Object interface generated from "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +type introducedInterface interface { + APILifecycleIntroduced() (major, minor int) +} + +func emulatedStorageVersion(binaryVersionOfResource schema.GroupVersion, example runtime.Object, effectiveVersion version.EffectiveVersion, scheme *runtime.Scheme) (schema.GroupVersion, error) { + if example == nil || effectiveVersion == nil { + return binaryVersionOfResource, nil + } + + // Look up example in scheme to find all objects of the same Group-Kind + // Use the highest priority version for that group-kind whose lifecycle window + // includes the current emulation version. + // If no version is found, use the binary version + // (in this case the API should be disabled anyway) + gvks, _, err := scheme.ObjectKinds(example) + if err != nil { + return schema.GroupVersion{}, err + } else if len(gvks) == 0 { + // Probably shouldn't happen if err is non-nil + return schema.GroupVersion{}, fmt.Errorf("object %T has no GVKs registered in scheme", example) + } + + // VersionsForGroupKind returns versions in priority order + versions := scheme.VersionsForGroupKind(schema.GroupKind{Group: gvks[0].Group, Kind: gvks[0].Kind}) + + compatibilityVersion := effectiveVersion.MinCompatibilityVersion() + + for _, gv := range versions { + if gv.Version == runtime.APIVersionInternal { + continue + } + + gvk := schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: gvks[0].Kind, + } + + exampleOfGVK, err := scheme.New(gvk) + if err != nil { + return schema.GroupVersion{}, err + } + + // If it was introduced after current compatibility version, don't use it + // skip the introduced check for test when currentVersion is 0.0 to test all apis + if introduced, hasIntroduced := exampleOfGVK.(introducedInterface); hasIntroduced && (compatibilityVersion.Major() > 0 || compatibilityVersion.Minor() > 0) { + // API resource lifecycles should be relative to k8s api version + majorIntroduced, minorIntroduced := introduced.APILifecycleIntroduced() + introducedVer := apimachineryversion.MajorMinor(uint(majorIntroduced), uint(minorIntroduced)) + if introducedVer.GreaterThan(compatibilityVersion) { + continue + } + } + + // versions is returned in priority order, so just use first result + return gvk.GroupVersion(), nil + } + + // Getting here means we're serving a version that is unknown to the + // min-compatibility-version server. + // + // This is only expected to happen when serving an alpha API type due + // to missing pre-release lifecycle information + // (which doesn't happen by default), or when emulation-version and + // min-compatibility-version are several versions apart so a beta or GA API + // was being served which didn't exist at all in min-compatibility-version. + // + // In the alpha case - we do not support compatibility versioning of + // alpha types and recommend users do not mix the two. + // In the skip-level case - The version of apiserver we are retaining + // compatibility with has no knowledge of the type, + // so storing it in another type is no issue. + return binaryVersionOfResource, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index 0dc50cea61..ad01c5a5d8 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -42,7 +42,7 @@ type Backend struct { type StorageFactory interface { // New finds the storage destination for the given group and resource. It will // return an error if the group has no storage destination configured. - NewConfig(groupResource schema.GroupResource) (*storagebackend.ConfigForResource, error) + NewConfig(groupResource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error) // ResourcePrefix returns the overridden resource prefix for the GroupResource // This allows for cohabitation of resources with different native types and provides @@ -226,7 +226,7 @@ func (s *DefaultStorageFactory) getStorageGroupResource(groupResource schema.Gro // New finds the storage destination for the given group and resource. It will // return an error if the group has no storage destination configured. -func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*storagebackend.ConfigForResource, error) { +func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error) { chosenStorageResource := s.getStorageGroupResource(groupResource) // operate on copy @@ -244,14 +244,23 @@ func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (* } var err error - codecConfig.StorageVersion, err = s.ResourceEncodingConfig.StorageEncodingFor(chosenStorageResource) - if err != nil { - return nil, err + if backwardCompatibleInterface, ok := s.ResourceEncodingConfig.(CompatibilityResourceEncodingConfig); ok { + codecConfig.StorageVersion, err = backwardCompatibleInterface.BackwardCompatibileStorageEncodingFor(groupResource, example) + if err != nil { + return nil, err + } + } else { + codecConfig.StorageVersion, err = s.ResourceEncodingConfig.StorageEncodingFor(chosenStorageResource) + if err != nil { + return nil, err + } } + codecConfig.MemoryVersion, err = s.ResourceEncodingConfig.InMemoryEncodingFor(groupResource) if err != nil { return nil, err } + codecConfig.Config = storageConfig storageConfig.Codec, storageConfig.EncodeVersioner, err = s.newStorageCodecFn(codecConfig) diff --git a/vendor/k8s.io/apiserver/pkg/server/storage_readiness_hook.go b/vendor/k8s.io/apiserver/pkg/server/storage_readiness_hook.go new file mode 100644 index 0000000000..64fc241a6d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/storage_readiness_hook.go @@ -0,0 +1,91 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "errors" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" +) + +// StorageReadinessHook implements PostStartHook functionality for checking readiness +// of underlying storage for registered resources. +type StorageReadinessHook struct { + timeout time.Duration + + lock sync.Mutex + checks map[string]func() error +} + +// NewStorageReadinessHook created new StorageReadinessHook. +func NewStorageReadinessHook(timeout time.Duration) *StorageReadinessHook { + return &StorageReadinessHook{ + checks: make(map[string]func() error), + timeout: timeout, + } +} + +func (h *StorageReadinessHook) RegisterStorage(gvr metav1.GroupVersionResource, storage rest.StorageWithReadiness) { + h.lock.Lock() + defer h.lock.Unlock() + + if _, ok := h.checks[gvr.String()]; !ok { + h.checks[gvr.String()] = storage.ReadinessCheck + } else { + klog.Errorf("Registering storage readiness hook for %s again: ", gvr.String()) + } +} + +func (h *StorageReadinessHook) check() bool { + h.lock.Lock() + defer h.lock.Unlock() + + failedChecks := []string{} + for gvr, check := range h.checks { + if err := check(); err != nil { + failedChecks = append(failedChecks, gvr) + } + } + if len(failedChecks) == 0 { + klog.Infof("Storage is ready for all registered resources") + return true + } + klog.V(4).Infof("Storage is not ready for: %v", failedChecks) + return false +} + +func (h *StorageReadinessHook) Hook(ctx PostStartHookContext) error { + deadlineCtx, cancel := context.WithTimeout(ctx, h.timeout) + defer cancel() + err := wait.PollUntilContextCancel(deadlineCtx, 100*time.Millisecond, true, + func(_ context.Context) (bool, error) { + if ok := h.check(); ok { + return true, nil + } + return false, nil + }) + if errors.Is(err, context.DeadlineExceeded) { + klog.Warningf("Deadline exceeded while waiting for storage readiness... ignoring") + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 1e6cc4d021..48791bd7b6 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" "reflect" + "strings" "sync" "time" @@ -42,6 +43,7 @@ import ( "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/cacher/metrics" + etcdfeature "k8s.io/apiserver/pkg/storage/feature" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/cache" "k8s.io/component-base/tracing" @@ -51,7 +53,8 @@ import ( ) var ( - emptyFunc = func(bool) {} + emptyFunc = func(bool) {} + coreNamespaceResource = schema.GroupResource{Group: "", Resource: "namespaces"} ) const ( @@ -531,9 +534,18 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions return nil, err } - readyGeneration, err := c.ready.waitAndReadGeneration(ctx) - if err != nil { - return nil, errors.NewServiceUnavailable(err.Error()) + var readyGeneration int + if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) { + var ok bool + readyGeneration, ok = c.ready.checkAndReadGeneration() + if !ok { + return nil, errors.NewTooManyRequests("storage is (re)initializing", 1) + } + } else { + readyGeneration, err = c.ready.waitAndReadGeneration(ctx) + if err != nil { + return nil, errors.NewServiceUnavailable(err.Error()) + } } // determine the namespace and name scope of the watch, first from the request, secondarily from the field selector @@ -549,6 +561,12 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions scope.name = selectorName } + // for request like '/api/v1/watch/namespaces/*', set scope.namespace to empty. + // namespaces don't populate metadata.namespace in ObjFields. + if c.groupResource == coreNamespaceResource && len(scope.namespace) > 0 && scope.namespace == scope.name { + scope.namespace = "" + } + triggerValue, triggerSupported := "", false if c.indexedTrigger != nil { for _, field := range pred.IndexFields { @@ -568,6 +586,12 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions // watchers on our watcher having a processing hiccup chanSize := c.watchCache.suggestedWatchChannelSize(c.indexedTrigger != nil, triggerSupported) + // client-go is going to fall back to a standard LIST on any error + // returned for watch-list requests + if isListWatchRequest(opts) && !etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) { + return newErrWatcher(fmt.Errorf("a watch stream was requested by the client but the required storage feature %s is disabled", storage.RequestWatchProgress)), nil + } + // Determine the ResourceVersion to which the watch cache must be synchronized requiredResourceVersion, err := c.getWatchCacheResourceVersion(ctx, requestedWatchRV, opts) if err != nil { @@ -591,7 +615,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions // to compute watcher.forget function (which has to happen under lock). watcher := newCacheWatcher( chanSize, - filterWithAttrsFunction(key, pred), + filterWithAttrsAndPrefixFunction(key, pred), emptyFunc, c.versioner, deadline, @@ -621,7 +645,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions defer c.watchCache.RUnlock() var cacheInterval *watchCacheInterval - cacheInterval, err = c.watchCache.getAllEventsSinceLocked(requiredResourceVersion, opts) + cacheInterval, err = c.watchCache.getAllEventsSinceLocked(requiredResourceVersion, key, opts) if err != nil { // To match the uncached watch implementation, once we have passed authn/authz/admission, // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, @@ -675,6 +699,14 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o return c.storage.Get(ctx, key, opts, objPtr) } + if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) { + if !c.ready.check() { + // If Cache is not initialized, delegate Get requests to storage + // as described in https://kep.k8s.io/4568 + return c.storage.Get(ctx, key, opts, objPtr) + } + } + // If resourceVersion is specified, serve it from cache. // It's guaranteed that the returned value is at least that // fresh as the given resourceVersion. @@ -683,16 +715,18 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o return err } - if getRV == 0 && !c.ready.check() { - // If Cacher is not yet initialized and we don't require any specific - // minimal resource version, simply forward the request to storage. - return c.storage.Get(ctx, key, opts, objPtr) - } - // Do not create a trace - it's not for free and there are tons // of Get requests. We can add it if it will be really needed. - if err := c.ready.wait(ctx); err != nil { - return errors.NewServiceUnavailable(err.Error()) + + if !utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) { + if getRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.Get(ctx, key, opts, objPtr) + } + if err := c.ready.wait(ctx); err != nil { + return errors.NewServiceUnavailable(err.Error()) + } } objVal, err := conversion.EnforcePtr(objPtr) @@ -728,17 +762,40 @@ func shouldDelegateList(opts storage.ListOptions) bool { pred := opts.Predicate match := opts.ResourceVersionMatch consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) + requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) // Serve consistent reads from storage if ConsistentListFromCache is disabled - consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled + consistentReadFromStorage := resourceVersion == "" && !(consistentListFromCacheEnabled && requestWatchProgressSupported) // Watch cache doesn't support continuations, so serve them from etcd. hasContinuation := len(pred.Continue) > 0 - // Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd. - hasLimit := pred.Limit > 0 && resourceVersion != "0" // Watch cache only supports ResourceVersionMatchNotOlderThan (default). - unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan + // see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-get-and-list + isLegacyExactMatch := opts.Predicate.Limit > 0 && match == "" && len(resourceVersion) > 0 && resourceVersion != "0" + unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan || isLegacyExactMatch - return consistentReadFromStorage || hasContinuation || hasLimit || unsupportedMatch + return consistentReadFromStorage || hasContinuation || unsupportedMatch +} + +// computeListLimit determines whether the cacher should +// apply a limit to an incoming LIST request and returns its value. +// +// note that this function doesn't check RVM nor the Continuation token. +// these parameters are validated by the shouldDelegateList function. +// +// as of today, the limit is ignored for requests that set RV == 0 +func computeListLimit(opts storage.ListOptions) int64 { + if opts.Predicate.Limit <= 0 || opts.ResourceVersion == "0" { + return 0 + } + return opts.Predicate.Limit +} + +func shouldDelegateListOnNotReadyCache(opts storage.ListOptions) bool { + pred := opts.Predicate + noLabelSelector := pred.Label == nil || pred.Label.Empty() + noFieldSelector := pred.Field == nil || pred.Field.Empty() + hasLimit := pred.Limit > 0 + return noLabelSelector && noFieldSelector && hasLimit } func (c *Cacher) listItems(ctx context.Context, listRV uint64, key string, pred storage.SelectionPredicate, recursive bool) ([]interface{}, uint64, string, error) { @@ -752,7 +809,7 @@ func (c *Cacher) listItems(ctx context.Context, listRV uint64, key string, pred } return nil, readResourceVersion, "", nil } - return c.watchCache.WaitUntilFreshAndList(ctx, listRV, pred.MatcherIndex(ctx)) + return c.watchCache.WaitUntilFreshAndList(ctx, listRV, key, pred.MatcherIndex(ctx)) } // GetList implements storage.Interface @@ -768,12 +825,31 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio if err != nil { return err } - if listRV == 0 && !c.ready.check() { - // If Cacher is not yet initialized and we don't require any specific - // minimal resource version, simply forward the request to storage. - return c.storage.GetList(ctx, key, opts, listObj) + + if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) { + if !c.ready.check() && shouldDelegateListOnNotReadyCache(opts) { + // If Cacher is not initialized, delegate List requests to storage + // as described in https://kep.k8s.io/4568 + return c.storage.GetList(ctx, key, opts, listObj) + } + } else { + if listRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.GetList(ctx, key, opts, listObj) + } } - if resourceVersion == "" && utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) { + // For recursive lists, we need to make sure the key ended with "/" so that we only + // get children "directories". e.g. if we have key "/a", "/a/b", "/ab", getting keys + // with prefix "/a" will return all three, while with prefix "/a/" will return only + // "/a/b" which is the correct answer. + preparedKey := key + if opts.Recursive && !strings.HasSuffix(key, "/") { + preparedKey += "/" + } + requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) + consistentRead := resourceVersion == "" && utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && requestWatchProgressSupported + if consistentRead { listRV, err = storage.GetCurrentResourceVersionFromStorage(ctx, c.storage, c.newListFunc, c.resourcePrefix, c.objectType.String()) if err != nil { return err @@ -785,8 +861,16 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio attribute.Stringer("type", c.groupResource)) defer span.End(500 * time.Millisecond) - if err := c.ready.wait(ctx); err != nil { - return errors.NewServiceUnavailable(err.Error()) + if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) { + if !c.ready.check() { + // If Cacher is not initialized, reject List requests + // as described in https://kep.k8s.io/4568 + return errors.NewTooManyRequests("storage is (re)initializing", 1) + } + } else { + if err := c.ready.wait(ctx); err != nil { + return errors.NewServiceUnavailable(err.Error()) + } } span.AddEvent("Ready") @@ -802,25 +886,47 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio if listVal.Kind() != reflect.Slice { return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) } - filter := filterWithAttrsFunction(key, pred) - objs, readResourceVersion, indexUsed, err := c.listItems(ctx, listRV, key, pred, recursive) + objs, readResourceVersion, indexUsed, err := c.listItems(ctx, listRV, preparedKey, pred, recursive) + success := "true" + fallback := "false" if err != nil { + if consistentRead { + if storage.IsTooLargeResourceVersion(err) { + fallback = "true" + err = c.storage.GetList(ctx, key, opts, listObj) + } + if err != nil { + success = "false" + } + metrics.ConsistentReadTotal.WithLabelValues(c.resourcePrefix, success, fallback).Add(1) + } return err } + if consistentRead { + metrics.ConsistentReadTotal.WithLabelValues(c.resourcePrefix, success, fallback).Add(1) + } span.AddEvent("Listed items from cache", attribute.Int("count", len(objs))) // store pointer of eligible objects, // Why not directly put object in the items of listObj? // the elements in ListObject are Struct type, making slice will bring excessive memory consumption. // so we try to delay this action as much as possible var selectedObjects []runtime.Object - for _, obj := range objs { + var lastSelectedObjectKey string + var hasMoreListItems bool + limit := computeListLimit(opts) + for i, obj := range objs { elem, ok := obj.(*storeElement) if !ok { return fmt.Errorf("non *storeElement returned from storage: %v", obj) } - if filter(elem.Key, elem.Labels, elem.Fields) { + if pred.MatchesObjectAttributes(elem.Labels, elem.Fields) { selectedObjects = append(selectedObjects, elem.Object) + lastSelectedObjectKey = elem.Key + } + if limit > 0 && int64(len(selectedObjects)) >= limit { + hasMoreListItems = i < len(objs)-1 + break } } if len(selectedObjects) == 0 { @@ -836,7 +942,12 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio } span.AddEvent("Filtered items", attribute.Int("count", listVal.Len())) if c.versioner != nil { - if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { + continueValue, remainingItemCount, err := storage.PrepareContinueToken(lastSelectedObjectKey, key, int64(readResourceVersion), int64(len(objs)), hasMoreListItems, opts) + if err != nil { + return err + } + + if err = c.versioner.UpdateList(listObj, readResourceVersion, continueValue, remainingItemCount); err != nil { return err } } @@ -867,6 +978,14 @@ func (c *Cacher) Count(pathPrefix string) (int64, error) { return c.storage.Count(pathPrefix) } +// ReadinessCheck implements storage.Interface. +func (c *Cacher) ReadinessCheck() error { + if !c.ready.check() { + return storage.ErrStorageNotReady + } + return nil +} + // baseObjectThreadUnsafe omits locking for cachingObject. func baseObjectThreadUnsafe(object runtime.Object) runtime.Object { if co, ok := object.(*cachingObject); ok { @@ -905,7 +1024,23 @@ func (c *Cacher) dispatchEvents() { bookmarkTimer := c.clock.NewTimer(wait.Jitter(time.Second, 0.25)) defer bookmarkTimer.Stop() + // The internal informer populates the RV as soon as it conducts + // The first successful sync with the underlying store. + // The cache must wait until this first sync is completed to be deemed ready. + // Since we cannot send a bookmark when the lastProcessedResourceVersion is 0, + // we poll aggressively for the first list RV before entering the dispatch loop. lastProcessedResourceVersion := uint64(0) + if err := wait.PollUntilContextCancel(wait.ContextForChannel(c.stopCh), 10*time.Millisecond, true, func(_ context.Context) (bool, error) { + if rv := c.watchCache.getListResourceVersion(); rv != 0 { + lastProcessedResourceVersion = rv + return true, nil + } + return false, nil + }); err != nil { + // given the function above never returns error, + // the non-empty error means that the stopCh was closed + return + } for { select { case event, ok := <-c.incoming: @@ -929,29 +1064,6 @@ func (c *Cacher) dispatchEvents() { metrics.EventsCounter.WithLabelValues(c.groupResource.String()).Inc() case <-bookmarkTimer.C(): bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25)) - // Never send a bookmark event if we did not see an event here, this is fine - // because we don't provide any guarantees on sending bookmarks. - // - // Just pop closed watchers and requeue others if needed. - // - // TODO(#115478): rework the following logic - // in a way that would allow more - // efficient cleanup of closed watchers - if lastProcessedResourceVersion == 0 { - func() { - c.Lock() - defer c.Unlock() - for _, watchers := range c.bookmarkWatchers.popExpiredWatchersThreadUnsafe() { - for _, watcher := range watchers { - if watcher.stopped { - continue - } - c.bookmarkWatchers.addWatcherThreadUnsafe(watcher) - } - } - }() - continue - } bookmarkEvent := &watchCacheEvent{ Type: watch.Bookmark, Object: c.newFunc(), @@ -1231,7 +1343,7 @@ func forgetWatcher(c *Cacher, w *cacheWatcher, index int, scope namespacedName, } } -func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc { +func filterWithAttrsAndPrefixFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc { filterFunc := func(objKey string, label labels.Set, field fields.Set) bool { if !hasPathPrefix(objKey, key) { return false @@ -1256,7 +1368,7 @@ func (c *Cacher) LastSyncResourceVersion() (uint64, error) { // // The returned function must be called under the watchCache lock. func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(parsedResourceVersion, requiredResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || !*opts.SendInitialEvents || !opts.Predicate.AllowWatchBookmarks { + if !isListWatchRequest(opts) { return func() uint64 { return 0 }, nil } @@ -1271,6 +1383,10 @@ func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(parsedResourceVersion } } +func isListWatchRequest(opts storage.ListOptions) bool { + return opts.SendInitialEvents != nil && *opts.SendInitialEvents && opts.Predicate.AllowWatchBookmarks +} + // getWatchCacheResourceVersion returns a ResourceVersion to which the watch cache must be synchronized to // // Depending on the input parameters, the semantics of the returned ResourceVersion are: @@ -1300,9 +1416,14 @@ func (c *Cacher) getWatchCacheResourceVersion(ctx context.Context, parsedWatchRe // can wait for events without unnecessary blocking. func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, requestedWatchRV uint64, opts storage.ListOptions) error { if opts.SendInitialEvents != nil && *opts.SendInitialEvents { - // TODO(p0lyn0mial): adapt the following logic once - // https://github.com/kubernetes/kubernetes/pull/123264 merges - if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && c.watchCache.notFresh(requestedWatchRV) { + // Here be dragons: + // Since the etcd feature checker needs to check all members + // to determine whether a given feature is supported, + // we may receive a positive response even if the feature is not supported. + // + // In this very rare scenario, the worst case will be that this + // request will wait for 3 seconds before it fails. + if etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) && c.watchCache.notFresh(requestedWatchRV) { c.watchCache.waitingUntilFresh.Add() defer c.watchCache.waitingUntilFresh.Remove() } @@ -1313,6 +1434,11 @@ func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, return nil } +// Wait blocks until the cacher is Ready or Stopped, it returns an error if Stopped. +func (c *Cacher) Wait(ctx context.Context) error { + return c.ready.wait(ctx) +} + // errWatcher implements watch.Interface to return a single error type errWatcher struct { result chan watch.Event diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/metrics.go index 6de0ad1eec..dd77febb93 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/metrics.go @@ -106,6 +106,17 @@ var ( []string{"resource"}, ) + watchCacheResourceVersion = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "resource_version", + Help: "Current resource version of watch cache broken by resource type.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) + watchCacheCapacityIncreaseTotal = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ Subsystem: subsystem, @@ -156,6 +167,15 @@ var ( StabilityLevel: compbasemetrics.ALPHA, Buckets: []float64{0.005, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3}, }, []string{"resource"}) + + ConsistentReadTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "consistent_read_total", + Help: "Counter for consistent reads from cache.", + StabilityLevel: compbasemetrics.ALPHA, + }, []string{"resource", "success", "fallback"}) ) var registerMetrics sync.Once @@ -171,11 +191,13 @@ func Register() { legacyregistry.MustRegister(EventsReceivedCounter) legacyregistry.MustRegister(EventsCounter) legacyregistry.MustRegister(TerminatedWatchersCounter) + legacyregistry.MustRegister(watchCacheResourceVersion) legacyregistry.MustRegister(watchCacheCapacityIncreaseTotal) legacyregistry.MustRegister(watchCacheCapacityDecreaseTotal) legacyregistry.MustRegister(WatchCacheCapacity) legacyregistry.MustRegister(WatchCacheInitializations) legacyregistry.MustRegister(WatchCacheReadWait) + legacyregistry.MustRegister(ConsistentReadTotal) }) } @@ -186,6 +208,11 @@ func RecordListCacheMetrics(resourcePrefix, indexName string, numFetched, numRet listCacheNumReturned.WithLabelValues(resourcePrefix).Add(float64(numReturned)) } +// RecordResourceVersion sets the current resource version for a given resource type. +func RecordResourceVersion(resourcePrefix string, resourceVersion uint64) { + watchCacheResourceVersion.WithLabelValues(resourcePrefix).Set(float64(resourceVersion)) +} + // RecordsWatchCacheCapacityChange record watchCache capacity resize(increase or decrease) operations. func RecordsWatchCacheCapacityChange(objType string, old, new int) { WatchCacheCapacity.WithLabelValues(objType).Set(float64(new)) diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index 9c4d93f176..34a48ac637 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -33,6 +33,7 @@ import ( "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/cacher/metrics" + etcdfeature "k8s.io/apiserver/pkg/storage/feature" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/cache" "k8s.io/component-base/tracing" @@ -311,25 +312,26 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd RecordTime: w.clock.Now(), } + // We can call w.store.Get() outside of a critical section, + // because the w.store itself is thread-safe and the only + // place where w.store is modified is below (via updateFunc) + // and these calls are serialized because reflector is processing + // events one-by-one. + previous, exists, err := w.store.Get(elem) + if err != nil { + return err + } + if exists { + previousElem := previous.(*storeElement) + wcEvent.PrevObject = previousElem.Object + wcEvent.PrevObjLabels = previousElem.Labels + wcEvent.PrevObjFields = previousElem.Fields + } + if err := func() error { - // TODO: We should consider moving this lock below after the watchCacheEvent - // is created. In such situation, the only problematic scenario is Replace() - // happening after getting object from store and before acquiring a lock. - // Maybe introduce another lock for this purpose. w.Lock() defer w.Unlock() - previous, exists, err := w.store.Get(elem) - if err != nil { - return err - } - if exists { - previousElem := previous.(*storeElement) - wcEvent.PrevObject = previousElem.Object - wcEvent.PrevObjLabels = previousElem.Labels - wcEvent.PrevObjFields = previousElem.Fields - } - w.updateCache(wcEvent) w.resourceVersion = resourceVersion defer w.cond.Broadcast() @@ -346,6 +348,7 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd if w.eventHandler != nil { w.eventHandler(wcEvent) } + metrics.RecordResourceVersion(w.groupResource.String(), resourceVersion) return nil } @@ -428,6 +431,7 @@ func (w *watchCache) UpdateResourceVersion(resourceVersion string) { } w.eventHandler(wcEvent) } + metrics.RecordResourceVersion(w.groupResource.String(), rv) } // List returns list of pointers to objects. @@ -497,8 +501,31 @@ func (s sortableStoreElements) Swap(i, j int) { // WaitUntilFreshAndList returns list of pointers to `storeElement` objects along // with their ResourceVersion and the name of the index, if any, that was used. -func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { - if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && w.notFresh(resourceVersion) { +func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) { + items, rv, index, err := w.waitUntilFreshAndListItems(ctx, resourceVersion, key, matchValues) + if err != nil { + return nil, 0, "", err + } + + var result []interface{} + for _, item := range items { + elem, ok := item.(*storeElement) + if !ok { + return nil, 0, "", fmt.Errorf("non *storeElement returned from storage: %v", item) + } + if !hasPathPrefix(elem.Key, key) { + continue + } + result = append(result, item) + } + + sort.Sort(sortableStoreElements(result)) + return result, rv, index, nil +} + +func (w *watchCache) waitUntilFreshAndListItems(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { + requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) + if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && requestWatchProgressSupported && w.notFresh(resourceVersion) { w.waitingUntilFresh.Add() err = w.waitUntilFreshAndBlock(ctx, resourceVersion) w.waitingUntilFresh.Remove() @@ -506,7 +533,6 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion err = w.waitUntilFreshAndBlock(ctx, resourceVersion) } - defer func() { sort.Sort(sortableStoreElements(result)) }() defer w.RUnlock() if err != nil { return result, rv, index, err @@ -626,7 +652,9 @@ func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { w.onReplace() } w.cond.Broadcast() - klog.V(3).Infof("Replace watchCache (rev: %v) ", resourceVersion) + + metrics.RecordResourceVersion(w.groupResource.String(), version) + klog.V(3).Infof("Replaced watchCache (rev: %v) ", resourceVersion) return nil } @@ -641,6 +669,12 @@ func (w *watchCache) Resync() error { return nil } +func (w *watchCache) getListResourceVersion() uint64 { + w.RLock() + defer w.RUnlock() + return w.listResourceVersion +} + func (w *watchCache) currentCapacity() int { w.RLock() defer w.RUnlock() @@ -703,9 +737,10 @@ func (w *watchCache) isIndexValidLocked(index int) bool { // getAllEventsSinceLocked returns a watchCacheInterval that can be used to // retrieve events since a certain resourceVersion. This function assumes to // be called under the watchCache lock. -func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, opts storage.ListOptions) (*watchCacheInterval, error) { +func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, key string, opts storage.ListOptions) (*watchCacheInterval, error) { + _, matchesSingle := opts.Predicate.MatchesSingle() if opts.SendInitialEvents != nil && *opts.SendInitialEvents { - return w.getIntervalFromStoreLocked() + return w.getIntervalFromStoreLocked(key, matchesSingle) } size := w.endIndex - w.startIndex @@ -734,7 +769,7 @@ func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, opts storag // current state and only then start watching from that point. // // TODO: In v2 api, we should stop returning the current state - #13969. - return w.getIntervalFromStoreLocked() + return w.getIntervalFromStoreLocked(key, matchesSingle) } // SendInitialEvents = false and resourceVersion = 0 // means that the request would like to start watching @@ -753,15 +788,15 @@ func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, opts storag indexerFunc := func(i int) *watchCacheEvent { return w.cache[i%w.capacity] } - ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, &w.RWMutex) + ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, w.RWMutex.RLocker()) return ci, nil } // getIntervalFromStoreLocked returns a watchCacheInterval // that covers the entire storage state. // This function assumes to be called under the watchCache lock. -func (w *watchCache) getIntervalFromStoreLocked() (*watchCacheInterval, error) { - ci, err := newCacheIntervalFromStore(w.resourceVersion, w.store, w.getAttrsFunc) +func (w *watchCache) getIntervalFromStoreLocked(key string, matchesSingle bool) (*watchCacheInterval, error) { + ci, err := newCacheIntervalFromStore(w.resourceVersion, w.store, w.getAttrsFunc, key, matchesSingle) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go index 2b57dd1650..fa7d389468 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go @@ -133,9 +133,22 @@ func (s sortableWatchCacheEvents) Swap(i, j int) { // returned by Next() need to be events from a List() done on the underlying store of // the watch cache. // The items returned in the interval will be sorted by Key. -func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc) (*watchCacheInterval, error) { +func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc, key string, matchesSingle bool) (*watchCacheInterval, error) { buffer := &watchCacheIntervalBuffer{} - allItems := store.List() + var allItems []interface{} + + if matchesSingle { + item, exists, err := store.GetByKey(key) + if err != nil { + return nil, err + } + + if exists { + allItems = append(allItems, item) + } + } else { + allItems = store.List() + } buffer.buffer = make([]*watchCacheEvent, len(allItems)) for i, item := range allItems { elem, ok := item.(*storeElement) diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go index 13f50bc187..087fb14e54 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go @@ -42,14 +42,14 @@ func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester requestWatchProgress: requestWatchProgress, contextMetadata: contextMetadata, } - pr.cond = sync.NewCond(pr.mux.RLocker()) + pr.cond = sync.NewCond(&pr.mux) return pr } type WatchProgressRequester func(ctx context.Context) error type TickerFactory interface { - NewTicker(time.Duration) clock.Ticker + NewTimer(time.Duration) clock.Timer } // conditionalProgressRequester will request progress notification if there @@ -59,7 +59,7 @@ type conditionalProgressRequester struct { requestWatchProgress WatchProgressRequester contextMetadata metadata.MD - mux sync.RWMutex + mux sync.Mutex cond *sync.Cond waiting int stopped bool @@ -78,12 +78,12 @@ func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) { pr.stopped = true pr.cond.Signal() }() - ticker := pr.clock.NewTicker(progressRequestPeriod) - defer ticker.Stop() + timer := pr.clock.NewTimer(progressRequestPeriod) + defer timer.Stop() for { stopped := func() bool { - pr.mux.RLock() - defer pr.mux.RUnlock() + pr.mux.Lock() + defer pr.mux.Unlock() for pr.waiting == 0 && !pr.stopped { pr.cond.Wait() } @@ -94,15 +94,17 @@ func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) { } select { - case <-ticker.C(): + case <-timer.C(): shouldRequest := func() bool { - pr.mux.RLock() - defer pr.mux.RUnlock() + pr.mux.Lock() + defer pr.mux.Unlock() return pr.waiting > 0 && !pr.stopped }() if !shouldRequest { + timer.Reset(0) continue } + timer.Reset(progressRequestPeriod) err := pr.requestWatchProgress(ctx) if err != nil { klog.V(4).InfoS("Error requesting bookmark", "err", err) @@ -124,5 +126,4 @@ func (pr *conditionalProgressRequester) Remove() { pr.mux.Lock() defer pr.mux.Unlock() pr.waiting -= 1 - pr.cond.Signal() } diff --git a/vendor/k8s.io/apiserver/pkg/storage/continue.go b/vendor/k8s.io/apiserver/pkg/storage/continue.go index fcd95af9b3..1d7976690a 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/continue.go +++ b/vendor/k8s.io/apiserver/pkg/storage/continue.go @@ -91,3 +91,30 @@ func EncodeContinue(key, keyPrefix string, resourceVersion int64) (string, error } return base64.RawURLEncoding.EncodeToString(out), nil } + +// PrepareContinueToken prepares optional +// parameters for retrieving additional results for a paginated request. +// +// This function sets up parameters that a client can use to fetch the remaining results +// from the server if they are available. +func PrepareContinueToken(keyLastItem, keyPrefix string, resourceVersion int64, itemsCount int64, hasMoreItems bool, opts ListOptions) (string, *int64, error) { + var remainingItemCount *int64 + var continueValue string + var err error + + if hasMoreItems { + // Instruct the client to begin querying from immediately after the last key. + continueValue, err = EncodeContinue(keyLastItem+"\x00", keyPrefix, resourceVersion) + if err != nil { + return "", remainingItemCount, err + } + // Etcd response counts in objects that do not match the pred. + // Instead of returning inaccurate count for non-empty selectors, we return nil. + // We only set remainingItemCount if the predicate is empty. + if opts.Predicate.Empty() { + remainingItems := itemsCount - opts.Predicate.Limit + remainingItemCount = &remainingItems + } + } + return continueValue, remainingItemCount, err +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors.go b/vendor/k8s.io/apiserver/pkg/storage/errors.go index e7e0957489..cc2c1c974d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/errors.go +++ b/vendor/k8s.io/apiserver/pkg/storage/errors.go @@ -25,7 +25,10 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ) -var ErrResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created") +var ( + ErrResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created") + ErrStorageNotReady = errors.New("storage not ready") +) const ( ErrCodeKeyNotFound int = iota + 1 diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS index 6b7e2928ea..33809ed8c8 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS @@ -2,3 +2,6 @@ reviewers: - wojtek-t + - serathius +labels: + - sig/etcd diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index ed6ebe4669..3eb967188b 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -39,9 +39,12 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/audit" endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" + etcdfeature "k8s.io/apiserver/pkg/storage/feature" "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" "k8s.io/klog/v2" ) @@ -139,6 +142,9 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func w.getCurrentStorageRV = func(ctx context.Context) (uint64, error) { return storage.GetCurrentResourceVersionFromStorage(ctx, s, newListFunc, resourcePrefix, w.objectType) } + if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) || utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + etcdfeature.DefaultFeatureSupportChecker.CheckClient(c.Ctx(), c, storage.RequestWatchProgress) + } return s } @@ -585,6 +591,11 @@ func (s *store) Count(key string) (int64, error) { return getResp.Count, nil } +// ReadinessCheck implements storage.Interface. +func (s *store) ReadinessCheck() error { + return nil +} + // resolveGetListRev is used by GetList to resolve the rev to use in the client.KV.Get request. func (s *store) resolveGetListRev(continueKey string, continueRV int64, opts storage.ListOptions) (int64, error) { var withRev int64 @@ -805,27 +816,11 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption v.Set(reflect.MakeSlice(v.Type(), 0, 0)) } - // instruct the client to begin querying from immediately after the last key we returned - // we never return a key that the client wouldn't be allowed to see - if hasMore { - // we want to start immediately after the last key - next, err := storage.EncodeContinue(string(lastKey)+"\x00", keyPrefix, withRev) - if err != nil { - return err - } - var remainingItemCount *int64 - // getResp.Count counts in objects that do not match the pred. - // Instead of returning inaccurate count for non-empty selectors, we return nil. - // Only set remainingItemCount if the predicate is empty. - if opts.Predicate.Empty() { - c := int64(getResp.Count - opts.Predicate.Limit) - remainingItemCount = &c - } - return s.versioner.UpdateList(listObj, uint64(withRev), next, remainingItemCount) + continueValue, remainingItemCount, err := storage.PrepareContinueToken(string(lastKey), keyPrefix, withRev, getResp.Count, hasMore, opts) + if err != nil { + return err } - - // no continuation - return s.versioner.UpdateList(listObj, uint64(withRev), "", nil) + return s.versioner.UpdateList(listObj, uint64(withRev), continueValue, remainingItemCount) } // growSlice takes a slice value and grows its capacity up diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index 85acf44f86..536f2e1c08 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -46,8 +46,9 @@ import ( const ( // We have set a buffer in order to reduce times of context switches. - incomingBufSize = 100 - outgoingBufSize = 100 + incomingBufSize = 100 + outgoingBufSize = 100 + processEventConcurrency = 10 ) // defaultWatcherMaxLimit is used to facilitate construction tests @@ -230,8 +231,7 @@ func (wc *watchChan) run(initialEventsEndBookmarkRequired, forceInitialEvents bo go wc.startWatching(watchClosedCh, initialEventsEndBookmarkRequired, forceInitialEvents) var resultChanWG sync.WaitGroup - resultChanWG.Add(1) - go wc.processEvent(&resultChanWG) + wc.processEvents(&resultChanWG) select { case err := <-wc.errChan: @@ -424,10 +424,17 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}, initialEventsEnd close(watchClosedCh) } -// processEvent processes events from etcd watcher and sends results to resultChan. -func (wc *watchChan) processEvent(wg *sync.WaitGroup) { +// processEvents processes events from etcd watcher and sends results to resultChan. +func (wc *watchChan) processEvents(wg *sync.WaitGroup) { + if utilfeature.DefaultFeatureGate.Enabled(features.ConcurrentWatchObjectDecode) { + wc.concurrentProcessEvents(wg) + } else { + wg.Add(1) + go wc.serialProcessEvents(wg) + } +} +func (wc *watchChan) serialProcessEvents(wg *sync.WaitGroup) { defer wg.Done() - for { select { case e := <-wc.incomingEventChan: @@ -435,7 +442,7 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) { if res == nil { continue } - if len(wc.resultChan) == outgoingBufSize { + if len(wc.resultChan) == cap(wc.resultChan) { klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", wc.watcher.objectType, "groupResource", wc.watcher.groupResource) } // If user couldn't receive results fast enough, we also block incoming events from watcher. @@ -452,6 +459,95 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) { } } +func (wc *watchChan) concurrentProcessEvents(wg *sync.WaitGroup) { + p := concurrentOrderedEventProcessing{ + input: wc.incomingEventChan, + processFunc: wc.transform, + output: wc.resultChan, + processingQueue: make(chan chan *watch.Event, processEventConcurrency-1), + + objectType: wc.watcher.objectType, + groupResource: wc.watcher.groupResource, + } + wg.Add(1) + go func() { + defer wg.Done() + p.scheduleEventProcessing(wc.ctx, wg) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.collectEventProcessing(wc.ctx) + }() +} + +type concurrentOrderedEventProcessing struct { + input chan *event + processFunc func(*event) *watch.Event + output chan watch.Event + + processingQueue chan chan *watch.Event + // Metadata for logging + objectType string + groupResource schema.GroupResource +} + +func (p *concurrentOrderedEventProcessing) scheduleEventProcessing(ctx context.Context, wg *sync.WaitGroup) { + var e *event + for { + select { + case <-ctx.Done(): + return + case e = <-p.input: + } + processingResponse := make(chan *watch.Event, 1) + select { + case <-ctx.Done(): + return + case p.processingQueue <- processingResponse: + } + wg.Add(1) + go func(e *event, response chan<- *watch.Event) { + defer wg.Done() + select { + case <-ctx.Done(): + case response <- p.processFunc(e): + } + }(e, processingResponse) + } +} + +func (p *concurrentOrderedEventProcessing) collectEventProcessing(ctx context.Context) { + var processingResponse chan *watch.Event + var e *watch.Event + for { + select { + case <-ctx.Done(): + return + case processingResponse = <-p.processingQueue: + } + select { + case <-ctx.Done(): + return + case e = <-processingResponse: + } + if e == nil { + continue + } + if len(p.output) == cap(p.output) { + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", p.objectType, "groupResource", p.groupResource) + } + // If user couldn't receive results fast enough, we also block incoming events from watcher. + // Because storing events in local will cause more memory usage. + // The worst case would be closing the fast watcher. + select { + case <-ctx.Done(): + return + case p.output <- *e: + } + } +} + func (wc *watchChan) filter(obj runtime.Object) bool { if wc.internalPred.Empty() { return true diff --git a/vendor/k8s.io/apiserver/pkg/storage/feature/feature_support_checker.go b/vendor/k8s.io/apiserver/pkg/storage/feature/feature_support_checker.go new file mode 100644 index 0000000000..77f5ab05c9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/feature/feature_support_checker.go @@ -0,0 +1,172 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "context" + "fmt" + "sync" + "time" + + clientv3 "go.etcd.io/etcd/client/v3" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/storage" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" +) + +var ( + // Define these static versions to use for checking version of etcd, issue on kubernetes #123192 + v3_4_31 = version.MustParseSemantic("3.4.31") + v3_5_0 = version.MustParseSemantic("3.5.0") + v3_5_13 = version.MustParseSemantic("3.5.13") + + // DefaultFeatureSupportChecker is a shared global etcd FeatureSupportChecker. + DefaultFeatureSupportChecker FeatureSupportChecker = newDefaultFeatureSupportChecker() +) + +// FeatureSupportChecker to define Supports functions. +type FeatureSupportChecker interface { + // Supports check if the feature is supported or not by checking internal cache. + // By default all calls to this function before calling CheckClient returns false. + // Returns true if all endpoints in etcd clients are supporting the feature. + // If client A supports and client B doesn't support the feature, the `Supports` will + // first return true at client A initializtion and then return false on client B + // initialzation, it can flip the support at runtime. + Supports(feature storage.Feature) bool + // CheckClient works with etcd client to recalcualte feature support and cache it internally. + // All etcd clients should support feature to cause `Supports` return true. + // If client A supports and client B doesn't support the feature, the `Supports` will + // first return true at client A initializtion and then return false on client B + // initialzation, it can flip the support at runtime. + CheckClient(ctx context.Context, c client, feature storage.Feature) +} + +type defaultFeatureSupportChecker struct { + lock sync.Mutex + progressNotifySupported *bool + checkingEndpoint map[string]struct{} +} + +func newDefaultFeatureSupportChecker() *defaultFeatureSupportChecker { + return &defaultFeatureSupportChecker{ + checkingEndpoint: make(map[string]struct{}), + } +} + +// Supports can check the featue from anywhere without storage if it was cached before. +func (f *defaultFeatureSupportChecker) Supports(feature storage.Feature) bool { + switch feature { + case storage.RequestWatchProgress: + f.lock.Lock() + defer f.lock.Unlock() + + return ptr.Deref(f.progressNotifySupported, false) + default: + runtime.HandleError(fmt.Errorf("feature %q is not implemented in DefaultFeatureSupportChecker", feature)) + return false + } +} + +// CheckClient accepts client and calculate the support per endpoint and caches it. +func (f *defaultFeatureSupportChecker) CheckClient(ctx context.Context, c client, feature storage.Feature) { + switch feature { + case storage.RequestWatchProgress: + f.checkClient(ctx, c) + default: + runtime.HandleError(fmt.Errorf("feature %q is not implemented in DefaultFeatureSupportChecker", feature)) + } +} + +func (f *defaultFeatureSupportChecker) checkClient(ctx context.Context, c client) { + // start with 10 ms, multiply by 2 each step, until 15 s and stays on 15 seconds. + delayFunc := wait.Backoff{ + Duration: 10 * time.Millisecond, + Cap: 15 * time.Second, + Factor: 2.0, + Steps: 11}.DelayFunc() + f.lock.Lock() + defer f.lock.Unlock() + for _, ep := range c.Endpoints() { + if _, found := f.checkingEndpoint[ep]; found { + continue + } + f.checkingEndpoint[ep] = struct{}{} + go func(ep string) { + defer runtime.HandleCrash() + err := delayFunc.Until(ctx, true, true, func(ctx context.Context) (done bool, err error) { + internalErr := f.clientSupportsRequestWatchProgress(ctx, c, ep) + return internalErr == nil, nil + }) + if err != nil { + klog.ErrorS(err, "Failed to check if RequestWatchProgress is supported by etcd after retrying") + } + }(ep) + } +} + +func (f *defaultFeatureSupportChecker) clientSupportsRequestWatchProgress(ctx context.Context, c client, ep string) error { + supported, err := endpointSupportsRequestWatchProgress(ctx, c, ep) + if err != nil { + return err + } + f.lock.Lock() + defer f.lock.Unlock() + + if !supported { + klog.Infof("RequestWatchProgress feature is not supported by %q endpoint", ep) + f.progressNotifySupported = ptr.To(false) + return nil + } + if f.progressNotifySupported == nil { + f.progressNotifySupported = ptr.To(true) + } + return nil +} + +// Sub interface of etcd client. +type client interface { + // Endpoints returns list of endpoints in etcd client. + Endpoints() []string + // Status retrieves the status information from the etcd client connected to the specified endpoint. + // It takes a context.Context parameter for cancellation or timeout control. + // It returns a clientv3.StatusResponse containing the status information or an error if the operation fails. + Status(ctx context.Context, endpoint string) (*clientv3.StatusResponse, error) +} + +// endpointSupportsRequestWatchProgress evaluates whether RequestWatchProgress supported by current version of etcd endpoint. +// Based on this issues: +// - https://github.com/etcd-io/etcd/issues/15220 - Fixed in etcd v3.4.25+ and v3.5.8+ +// - https://github.com/etcd-io/etcd/issues/17507 - Fixed in etcd v3.4.31+ and v3.5.13+ +func endpointSupportsRequestWatchProgress(ctx context.Context, c client, endpoint string) (bool, error) { + resp, err := c.Status(ctx, endpoint) + if err != nil { + return false, fmt.Errorf("failed checking etcd version, endpoint: %q: %w", endpoint, err) + } + ver, err := version.ParseSemantic(resp.Version) + if err != nil { + // Assume feature is not supported if etcd version cannot be parsed. + klog.ErrorS(err, "Failed to parse etcd version", "version", resp.Version) + return false, nil + } + if ver.LessThan(v3_4_31) || ver.AtLeast(v3_5_0) && ver.LessThan(v3_5_13) { + return false, nil + } + return true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go index 5489660809..cff804b285 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -29,6 +29,12 @@ import ( "k8s.io/apimachinery/pkg/watch" ) +// Feature is the name of each feature in storage that we check in feature_support_checker. +type Feature = string + +// RequestWatchProgress is an etcd feature that may use to check if it supported or not. +var RequestWatchProgress Feature = "RequestWatchProgress" + // Versioner abstracts setting and retrieving metadata fields from database response // onto the object ot list. It is required to maintain storage invariants - updating an // object twice with the same data except for the ResourceVersion and SelfLink must be @@ -237,6 +243,9 @@ type Interface interface { // Count returns number of different entries under the key (generally being path prefix). Count(key string) (int64, error) + // ReadinessCheck checks if the storage is ready for accepting requests. + ReadinessCheck() error + // RequestWatchProgress requests the a watch stream progress status be sent in the // watch response stream as soon as possible. // Used for monitor watch progress even if watching resources with no changes. diff --git a/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go b/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go index e652845c28..480b5a8934 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go +++ b/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go @@ -118,7 +118,7 @@ func (s *SelectionPredicate) MatchesObjectAttributes(l labels.Set, f fields.Set) // MatchesSingleNamespace will return (namespace, true) if and only if s.Field matches on the object's // namespace. func (s *SelectionPredicate) MatchesSingleNamespace() (string, bool) { - if len(s.Continue) > 0 { + if len(s.Continue) > 0 || s.Field == nil { return "", false } if namespace, ok := s.Field.RequiresExactMatch("metadata.namespace"); ok { @@ -130,7 +130,7 @@ func (s *SelectionPredicate) MatchesSingleNamespace() (string, bool) { // MatchesSingle will return (name, true) if and only if s.Field matches on the object's // name. func (s *SelectionPredicate) MatchesSingle() (string, bool) { - if len(s.Continue) > 0 { + if len(s.Continue) > 0 || s.Field == nil { return "", false } // TODO: should be namespace.name diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 93b1e707f6..822470778d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -20,6 +20,7 @@ import ( "time" oteltrace "go.opentelemetry.io/otel/trace" + noopoteltrace "go.opentelemetry.io/otel/trace/noop" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -117,6 +118,6 @@ func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { HealthcheckTimeout: DefaultHealthcheckTimeout, ReadycheckTimeout: DefaultReadinessTimeout, LeaseManagerConfig: etcd3.NewDefaultLeaseManagerConfig(), - Transport: TransportConfig{TracerProvider: oteltrace.NewNoopTracerProvider()}, + Transport: TransportConfig{TracerProvider: noopoteltrace.NewTracerProvider()}, } } diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 2aab5c76d2..4f3aad9f96 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -317,6 +317,7 @@ var newETCD3Client = func(c storagebackend.TransportConfig) (*clientv3.Client, e } if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) { tracingOpts := []otelgrpc.Option{ + otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents), otelgrpc.WithPropagators(tracing.Propagators()), otelgrpc.WithTracerProvider(c.TracerProvider), } diff --git a/vendor/k8s.io/apiserver/pkg/storage/util.go b/vendor/k8s.io/apiserver/pkg/storage/util.go index 6d5fb36d24..1aca351d99 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/util.go +++ b/vendor/k8s.io/apiserver/pkg/storage/util.go @@ -24,18 +24,12 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/validation/path" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" ) -const ( - // initialEventsAnnotationKey the name of the key - // under which an annotation marking the end of list stream - // is kept. - initialEventsAnnotationKey = "k8s.io/initial-events-end" -) - type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) // SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc @@ -46,10 +40,6 @@ func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { } } -func EverythingFunc(runtime.Object) bool { - return true -} - func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { meta, err := meta.Accessor(obj) if err != nil { @@ -144,7 +134,7 @@ func AnnotateInitialEventsEndBookmark(obj runtime.Object) error { if objAnnotations == nil { objAnnotations = map[string]string{} } - objAnnotations[initialEventsAnnotationKey] = "true" + objAnnotations[metav1.InitialEventsAnnotationKey] = "true" objMeta.SetAnnotations(objAnnotations) return nil } @@ -157,5 +147,5 @@ func HasInitialEventsEndBookmarkAnnotation(obj runtime.Object) (bool, error) { return false, err } objAnnotations := objMeta.GetAnnotations() - return objAnnotations[initialEventsAnnotationKey] == "true", nil + return objAnnotations[metav1.InitialEventsAnnotationKey] == "true", nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go index 5911b7568c..00a9e099ba 100644 --- a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -24,8 +24,8 @@ var ( // DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate. // Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this. // Tests that need to modify feature gates for the duration of their test should use: - // defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )() - DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate() + // featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., ) + DefaultMutableFeatureGate featuregate.MutableVersionedFeatureGate = featuregate.NewFeatureGate() // DefaultFeatureGate is a shared global FeatureGate. // Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate. diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index d40cae509d..c707884e13 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -135,7 +135,7 @@ type configController struct { // configQueue holds `(interface{})(0)` when the configuration // objects need to be reprocessed. - configQueue workqueue.RateLimitingInterface + configQueue workqueue.TypedRateLimitingInterface[int] plLister flowcontrollister.PriorityLevelConfigurationLister plInformerSynced cache.InformerSynced @@ -204,7 +204,7 @@ type priorityLevelState struct { // reached through this pointer is mutable. pl *flowcontrol.PriorityLevelConfiguration - // qsCompleter holds the QueueSetCompleter derived from `config` + // qsCompleter holds the QueueSetCompleter derived from `pl` // and `queues`. qsCompleter fq.QueueSetCompleter @@ -255,12 +255,12 @@ type priorityLevelState struct { type seatDemandStats struct { avg float64 stdDev float64 - highWatermark float64 + highWatermark int smoothed float64 } func (stats *seatDemandStats) update(obs fq.IntegratorResults) { - stats.highWatermark = obs.Max + stats.highWatermark = int(math.Round(obs.Max)) if obs.Duration <= 0 { return } @@ -292,7 +292,10 @@ func newTestableController(config TestableConfig) *configController { klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager) // Start with longish delay because conflicts will be between // different processes, so take some time to go away. - cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") + cfgCtlr.configQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[int](200*time.Millisecond, 8*time.Hour), + workqueue.TypedRateLimitingQueueConfig[int]{Name: "priority_and_fairness_config_queue"}, + ) // ensure the data structure reflects the mandatory config cfgCtlr.lockAndDigestConfigObjects(nil, nil) fci := config.InformerFactory.Flowcontrol().V1() @@ -395,38 +398,63 @@ func (cfgCtlr *configController) updateBorrowing() { func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plStates map[string]*priorityLevelState) { items := make([]allocProblemItem, 0, len(plStates)) - plNames := make([]string, 0, len(plStates)) + nonExemptPLNames := make([]string, 0, len(plStates)) + idxOfNonExempt := map[string]int{} // items index of non-exempt classes + cclOfExempt := map[string]int{} // minCurrentCL of exempt classes + var minCLSum, minCurrentCLSum int // sums over non-exempt classes + remainingServerCL := cfgCtlr.nominalCLSum for plName, plState := range plStates { obs := plState.seatDemandIntegrator.Reset() plState.seatDemandStats.update(obs) - // Lower bound on this priority level's adjusted concurreny limit is the lesser of: - // - its seat demamd high watermark over the last adjustment period, and - // - its configured concurrency limit. - // BUT: we do not want this to be lower than the lower bound from configuration. - // See KEP-1040 for a more detailed explanation. - minCurrentCL := math.Max(float64(plState.minCL), math.Min(float64(plState.nominalCL), plState.seatDemandStats.highWatermark)) - plNames = append(plNames, plName) - items = append(items, allocProblemItem{ - lowerBound: minCurrentCL, - upperBound: float64(plState.maxCL), - target: math.Max(minCurrentCL, plState.seatDemandStats.smoothed), - }) + var minCurrentCL int + if plState.pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt { + minCurrentCL = max(plState.minCL, plState.seatDemandStats.highWatermark) + cclOfExempt[plName] = minCurrentCL + remainingServerCL -= minCurrentCL + } else { + // Lower bound on this priority level's adjusted concurreny limit is the lesser of: + // - its seat demamd high watermark over the last adjustment period, and + // - its configured concurrency limit. + // BUT: we do not want this to be lower than the lower bound from configuration. + // See KEP-1040 for a more detailed explanation. + minCurrentCL = max(plState.minCL, min(plState.nominalCL, plState.seatDemandStats.highWatermark)) + idxOfNonExempt[plName] = len(items) + nonExemptPLNames = append(nonExemptPLNames, plName) + items = append(items, allocProblemItem{ + lowerBound: float64(minCurrentCL), + upperBound: float64(plState.maxCL), + target: math.Max(float64(minCurrentCL), plState.seatDemandStats.smoothed), + }) + minCLSum += plState.minCL + minCurrentCLSum += minCurrentCL + } } if len(items) == 0 && cfgCtlr.nominalCLSum > 0 { klog.ErrorS(nil, "Impossible: no priority levels", "plStates", cfgCtlr.priorityLevelStates) return } - allocs, fairFrac, err := computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items) - if err != nil { - klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", plNames, "items", items) - allocs = make([]float64, len(items)) - for idx, plName := range plNames { - plState := plStates[plName] - allocs[idx] = float64(plState.currentCL) + var allocs []float64 + var shareFrac, fairFrac float64 + var err error + if remainingServerCL <= minCLSum { + metrics.SetFairFrac(0) + } else if remainingServerCL <= minCurrentCLSum { + shareFrac = float64(remainingServerCL-minCLSum) / float64(minCurrentCLSum-minCLSum) + metrics.SetFairFrac(0) + } else { + allocs, fairFrac, err = computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items) + if err != nil { + klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", nonExemptPLNames, "items", items) + allocs = make([]float64, len(items)) + for idx, plName := range nonExemptPLNames { + plState := plStates[plName] + allocs[idx] = float64(plState.currentCL) + } } + metrics.SetFairFrac(float64(fairFrac)) } - for idx, plName := range plNames { - plState := plStates[plName] + for plName, plState := range plStates { + idx, isNonExempt := idxOfNonExempt[plName] if setCompleters { qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues, plState.pl, plState.reqsGaugePair, plState.execSeatsObs, @@ -437,10 +465,20 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta } plState.qsCompleter = qsCompleter } - currentCL := int(math.Round(float64(allocs[idx]))) + var currentCL int + if !isNonExempt { + currentCL = cclOfExempt[plName] + } else if remainingServerCL <= minCLSum { + currentCL = plState.minCL + } else if remainingServerCL <= minCurrentCLSum { + minCurrentCL := max(plState.minCL, min(plState.nominalCL, plState.seatDemandStats.highWatermark)) + currentCL = plState.minCL + int(math.Round(float64(minCurrentCL-plState.minCL)*shareFrac)) + } else { + currentCL = int(math.Round(float64(allocs[idx]))) + } relChange := relDiff(float64(currentCL), float64(plState.currentCL)) plState.currentCL = currentCL - metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, plState.seatDemandStats.highWatermark, plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL) + metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, float64(plState.seatDemandStats.highWatermark), plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL) logLevel := klog.Level(4) if relChange >= 0.05 { logLevel = 2 @@ -455,7 +493,6 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta klog.V(logLevel).InfoS("Update CurrentCL", "plName", plName, "seatDemandHighWatermark", plState.seatDemandStats.highWatermark, "seatDemandAvg", plState.seatDemandStats.avg, "seatDemandStdev", plState.seatDemandStats.stdDev, "seatDemandSmoothed", plState.seatDemandStats.smoothed, "fairFrac", fairFrac, "currentCL", currentCL, "concurrencyDenominator", concurrencyDenominator, "backstop", err != nil) plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: currentCL, ConcurrencyDenominator: concurrencyDenominator}) } - metrics.SetFairFrac(float64(fairFrac)) } // runWorker is the logic of the one and only worker goroutine. We @@ -474,7 +511,7 @@ func (cfgCtlr *configController) processNextWorkItem() bool { return false } - func(obj interface{}) { + func(obj int) { defer cfgCtlr.configQueue.Done(obj) specificDelay, err := cfgCtlr.syncOne() switch { diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go index 6b941cb7fe..a9e76141e9 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go @@ -25,6 +25,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" apirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + etcdfeature "k8s.io/apiserver/pkg/storage/feature" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" ) @@ -165,15 +167,16 @@ func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool { resourceVersion := opts.ResourceVersion match := opts.ResourceVersionMatch consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) + requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) // Serve consistent reads from storage if ConsistentListFromCache is disabled - consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled + consistentReadFromStorage := resourceVersion == "" && !(consistentListFromCacheEnabled && requestWatchProgressSupported) // Watch cache doesn't support continuations, so serve them from etcd. hasContinuation := len(opts.Continue) > 0 - // Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd. - hasLimit := opts.Limit > 0 && resourceVersion != "0" // Watch cache only supports ResourceVersionMatchNotOlderThan (default). - unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan + // see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-get-and-list + isLegacyExactMatch := opts.Limit > 0 && match == "" && len(resourceVersion) > 0 && resourceVersion != "0" + unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan || isLegacyExactMatch - return consistentReadFromStorage || hasContinuation || hasLimit || unsupportedMatch + return consistentReadFromStorage || hasContinuation || unsupportedMatch } diff --git a/vendor/k8s.io/apiserver/pkg/util/proxy/doc.go b/vendor/k8s.io/apiserver/pkg/util/proxy/doc.go new file mode 100644 index 0000000000..d24c9423ca --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/proxy/doc.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Among other files, this directory contains functionality for two +// stream proxies: streamtranslator.go and streamtunnel.go. Both of +// these proxies allow the inter-connection of WebSocket and SPDY +// streaming connections. +// +// The stream translator proxy is used for the RemoteCommand +// subprotocol (e.g. kubectl exec, cp, and attach), and it connects +// the output streams of a WebSocket connection (e.g. STDIN, STDOUT, +// STDERR, TTY resize, and error streams) to the input streams of a +// SPDY connection. +// +// The stream tunnel proxy tunnels SPDY frames through a WebSocket +// connection, and it is used for the PortForward subprotocol (e.g. +// kubectl port-forward). This proxy implements tunneling by transparently +// encoding and decoding SPDY framed data into and out of the payload of a +// WebSocket data frame. The primary structure for this tunneling is +// the TunnelingConnection. A lot of the other code in streamtunnel.go +// is for properly upgrading both the upstream SPDY connection and the +// downstream WebSocket connection before streaming begins. +package proxy // import "k8s.io/apiserver/pkg/util/proxy" diff --git a/vendor/k8s.io/apiserver/pkg/util/proxy/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/util/proxy/metrics/metrics.go index 0fb9e7f9cd..e7e83c1718 100644 --- a/vendor/k8s.io/apiserver/pkg/util/proxy/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/util/proxy/metrics/metrics.go @@ -33,7 +33,7 @@ var registerMetricsOnce sync.Once var ( // streamTranslatorRequestsTotal counts the number of requests that were handled by - // the StreamTranslatorProxy. + // the StreamTranslatorProxy (RemoteCommand subprotocol). streamTranslatorRequestsTotal = metrics.NewCounterVec( &metrics.CounterOpts{ Subsystem: subsystem, @@ -43,19 +43,37 @@ var ( }, []string{statuscode}, ) + // streamTunnelRequestsTotal counts the number of requests that were handled by + // the StreamTunnelProxy (PortForward subprotocol). + streamTunnelRequestsTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "stream_tunnel_requests_total", + Help: "Total number of requests that were handled by the StreamTunnelProxy, which processes streaming PortForward/V2", + StabilityLevel: metrics.ALPHA, + }, + []string{statuscode}, + ) ) func Register() { registerMetricsOnce.Do(func() { legacyregistry.MustRegister(streamTranslatorRequestsTotal) + legacyregistry.MustRegister(streamTunnelRequestsTotal) }) } func ResetForTest() { streamTranslatorRequestsTotal.Reset() + streamTunnelRequestsTotal.Reset() } // IncStreamTranslatorRequest increments the # of requests handled by the StreamTranslatorProxy. func IncStreamTranslatorRequest(ctx context.Context, status string) { streamTranslatorRequestsTotal.WithContext(ctx).WithLabelValues(status).Add(1) } + +// IncStreamTunnelRequest increments the # of requests handled by the StreamTunnelProxy. +func IncStreamTunnelRequest(ctx context.Context, status string) { + streamTunnelRequestsTotal.WithContext(ctx).WithLabelValues(status).Add(1) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/proxy/streamtunnel.go b/vendor/k8s.io/apiserver/pkg/util/proxy/streamtunnel.go index 55592e7d41..7a7b92adaf 100644 --- a/vendor/k8s.io/apiserver/pkg/util/proxy/streamtunnel.go +++ b/vendor/k8s.io/apiserver/pkg/util/proxy/streamtunnel.go @@ -19,10 +19,12 @@ package proxy import ( "bufio" "bytes" + "context" "errors" "fmt" "net" "net/http" + "strconv" "strings" "sync" "time" @@ -34,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream/wsstream" utilnet "k8s.io/apimachinery/pkg/util/net" constants "k8s.io/apimachinery/pkg/util/portforward" + "k8s.io/apiserver/pkg/util/proxy/metrics" "k8s.io/client-go/tools/portforward" "k8s.io/klog/v2" ) @@ -61,6 +64,7 @@ func (h *TunnelingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { spdyProtocols := spdyProtocolsFromWebsocketProtocols(req) if len(spdyProtocols) == 0 { + metrics.IncStreamTunnelRequest(req.Context(), strconv.Itoa(http.StatusBadRequest)) http.Error(w, "unable to upgrade: no tunneling spdy protocols provided", http.StatusBadRequest) return } @@ -326,6 +330,7 @@ func (u *tunnelingWebsocketUpgraderConn) InitializeWrite(backendResponse *http.R if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(spdy.HeaderSpdy31)) { klog.Errorf("unable to upgrade: missing upgrade headers in response: %#v", backendResponse.Header) u.err = fmt.Errorf("unable to upgrade: missing upgrade headers in response") + metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(http.StatusInternalServerError)) http.Error(u.w, u.err.Error(), http.StatusInternalServerError) return u.err } @@ -347,16 +352,20 @@ func (u *tunnelingWebsocketUpgraderConn) InitializeWrite(backendResponse *http.R conn, err := upgrader.Upgrade(u.w, u.req, nil) if err != nil { klog.Errorf("error upgrading websocket connection: %v", err) + metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(http.StatusInternalServerError)) u.err = err return u.err } klog.V(4).Infof("websocket connection created: %s", conn.Subprotocol()) + metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(http.StatusSwitchingProtocols)) u.conn = portforward.NewTunnelingConnection("server", conn) return nil } // anything other than an upgrade should pass through the backend response + klog.Errorf("SPDY upgrade failed: %s", backendResponse.Status) + metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(backendResponse.StatusCode)) // try to hijack conn, _, err = u.w.(http.Hijacker).Hijack() diff --git a/vendor/k8s.io/apiserver/pkg/util/proxy/websocket.go b/vendor/k8s.io/apiserver/pkg/util/proxy/websocket.go index 3b9746b3b2..798ce17672 100644 --- a/vendor/k8s.io/apiserver/pkg/util/proxy/websocket.go +++ b/vendor/k8s.io/apiserver/pkg/util/proxy/websocket.go @@ -79,7 +79,7 @@ func webSocketServerStreams(req *http.Request, w http.ResponseWriter, opts Optio if p := recover(); p != nil { // Standard panic logging. for _, fn := range runtime.PanicHandlers { - fn(p) + fn(req.Context(), p) } } }() diff --git a/vendor/k8s.io/apiserver/pkg/util/version/registry.go b/vendor/k8s.io/apiserver/pkg/util/version/registry.go new file mode 100644 index 0000000000..0db9c48a6c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/version/registry.go @@ -0,0 +1,454 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "sort" + "strings" + "sync" + + "github.com/spf13/pflag" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/featuregate" + "k8s.io/klog/v2" +) + +// DefaultComponentGlobalsRegistry is the global var to store the effective versions and feature gates for all components for easy access. +// Example usage: +// // register the component effective version and feature gate first +// _, _ = utilversion.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister(utilversion.DefaultKubeComponent, utilversion.DefaultKubeEffectiveVersion(), utilfeature.DefaultMutableFeatureGate) +// wardleEffectiveVersion := utilversion.NewEffectiveVersion("1.2") +// wardleFeatureGate := featuregate.NewFeatureGate() +// utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(apiserver.WardleComponentName, wardleEffectiveVersion, wardleFeatureGate, false)) +// +// cmd := &cobra.Command{ +// ... +// // call DefaultComponentGlobalsRegistry.Set() in PersistentPreRunE +// PersistentPreRunE: func(*cobra.Command, []string) error { +// if err := utilversion.DefaultComponentGlobalsRegistry.Set(); err != nil { +// return err +// } +// ... +// }, +// RunE: func(c *cobra.Command, args []string) error { +// // call utilversion.DefaultComponentGlobalsRegistry.Validate() somewhere +// }, +// } +// +// flags := cmd.Flags() +// // add flags +// utilversion.DefaultComponentGlobalsRegistry.AddFlags(flags) +var DefaultComponentGlobalsRegistry ComponentGlobalsRegistry = NewComponentGlobalsRegistry() + +const ( + DefaultKubeComponent = "kube" + + klogLevel = 2 +) + +type VersionMapping func(from *version.Version) *version.Version + +// ComponentGlobals stores the global variables for a component for easy access. +type ComponentGlobals struct { + effectiveVersion MutableEffectiveVersion + featureGate featuregate.MutableVersionedFeatureGate + + // emulationVersionMapping contains the mapping from the emulation version of this component + // to the emulation version of another component. + emulationVersionMapping map[string]VersionMapping + // dependentEmulationVersion stores whether or not this component's EmulationVersion is dependent through mapping on another component. + // If true, the emulation version cannot be set from the flag, or version mapping from another component. + dependentEmulationVersion bool + // minCompatibilityVersionMapping contains the mapping from the min compatibility version of this component + // to the min compatibility version of another component. + minCompatibilityVersionMapping map[string]VersionMapping + // dependentMinCompatibilityVersion stores whether or not this component's MinCompatibilityVersion is dependent through mapping on another component + // If true, the min compatibility version cannot be set from the flag, or version mapping from another component. + dependentMinCompatibilityVersion bool +} + +type ComponentGlobalsRegistry interface { + // EffectiveVersionFor returns the EffectiveVersion registered under the component. + // Returns nil if the component is not registered. + EffectiveVersionFor(component string) EffectiveVersion + // FeatureGateFor returns the FeatureGate registered under the component. + // Returns nil if the component is not registered. + FeatureGateFor(component string) featuregate.FeatureGate + // Register registers the EffectiveVersion and FeatureGate for a component. + // returns error if the component is already registered. + Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error + // ComponentGlobalsOrRegister would return the registered global variables for the component if it already exists in the registry. + // Otherwise, the provided variables would be registered under the component, and the same variables would be returned. + ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate) + // AddFlags adds flags of "--emulated-version" and "--feature-gates" + AddFlags(fs *pflag.FlagSet) + // Set sets the flags for all global variables for all components registered. + Set() error + // SetFallback calls Set() if it has never been called. + SetFallback() error + // Validate calls the Validate() function for all the global variables for all components registered. + Validate() []error + // Reset removes all stored ComponentGlobals, configurations, and version mappings. + Reset() + // SetEmulationVersionMapping sets the mapping from the emulation version of one component + // to the emulation version of another component. + // Once set, the emulation version of the toComponent will be determined by the emulation version of the fromComponent, + // and cannot be set from cmd flags anymore. + // For a given component, its emulation version can only depend on one other component, no multiple dependency is allowed. + SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error +} + +type componentGlobalsRegistry struct { + componentGlobals map[string]*ComponentGlobals + mutex sync.RWMutex + // list of component name to emulation version set from the flag. + emulationVersionConfig []string + // map of component name to the list of feature gates set from the flag. + featureGatesConfig map[string][]string + // set stores if the Set() function for the registry is already called. + set bool +} + +func NewComponentGlobalsRegistry() *componentGlobalsRegistry { + return &componentGlobalsRegistry{ + componentGlobals: make(map[string]*ComponentGlobals), + emulationVersionConfig: nil, + featureGatesConfig: nil, + } +} + +func (r *componentGlobalsRegistry) Reset() { + r.mutex.Lock() + defer r.mutex.Unlock() + r.componentGlobals = make(map[string]*ComponentGlobals) + r.emulationVersionConfig = nil + r.featureGatesConfig = nil + r.set = false +} + +func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) EffectiveVersion { + r.mutex.RLock() + defer r.mutex.RUnlock() + globals, ok := r.componentGlobals[component] + if !ok { + return nil + } + return globals.effectiveVersion +} + +func (r *componentGlobalsRegistry) FeatureGateFor(component string) featuregate.FeatureGate { + r.mutex.RLock() + defer r.mutex.RUnlock() + globals, ok := r.componentGlobals[component] + if !ok { + return nil + } + return globals.featureGate +} + +func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error { + if _, ok := r.componentGlobals[component]; ok { + return fmt.Errorf("component globals of %s already registered", component) + } + if featureGate != nil { + if err := featureGate.SetEmulationVersion(effectiveVersion.EmulationVersion()); err != nil { + return err + } + } + c := ComponentGlobals{ + effectiveVersion: effectiveVersion, + featureGate: featureGate, + emulationVersionMapping: make(map[string]VersionMapping), + minCompatibilityVersionMapping: make(map[string]VersionMapping), + } + r.componentGlobals[component] = &c + return nil +} + +func (r *componentGlobalsRegistry) Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error { + if effectiveVersion == nil { + return fmt.Errorf("cannot register nil effectiveVersion") + } + r.mutex.Lock() + defer r.mutex.Unlock() + return r.unsafeRegister(component, effectiveVersion, featureGate) +} + +func (r *componentGlobalsRegistry) ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate) { + r.mutex.Lock() + defer r.mutex.Unlock() + globals, ok := r.componentGlobals[component] + if ok { + return globals.effectiveVersion, globals.featureGate + } + utilruntime.Must(r.unsafeRegister(component, effectiveVersion, featureGate)) + return effectiveVersion, featureGate +} + +func (r *componentGlobalsRegistry) unsafeKnownFeatures() []string { + var known []string + for component, globals := range r.componentGlobals { + if globals.featureGate == nil { + continue + } + for _, f := range globals.featureGate.KnownFeatures() { + known = append(known, component+":"+f) + } + } + sort.Strings(known) + return known +} + +func (r *componentGlobalsRegistry) unsafeVersionFlagOptions(isEmulation bool) []string { + var vs []string + for component, globals := range r.componentGlobals { + binaryVer := globals.effectiveVersion.BinaryVersion() + if isEmulation { + if globals.dependentEmulationVersion { + continue + } + // emulated version could be between binaryMajor.{binaryMinor} and binaryMajor.{binaryMinor} + // TODO: change to binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} in 1.32 + vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component, + binaryVer.SubtractMinor(0).String(), binaryVer.String(), globals.effectiveVersion.EmulationVersion().String())) + } else { + if globals.dependentMinCompatibilityVersion { + continue + } + // min compatibility version could be between binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} + vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component, + binaryVer.SubtractMinor(1).String(), binaryVer.String(), globals.effectiveVersion.MinCompatibilityVersion().String())) + } + } + sort.Strings(vs) + return vs +} + +func (r *componentGlobalsRegistry) AddFlags(fs *pflag.FlagSet) { + if r == nil { + return + } + r.mutex.Lock() + defer r.mutex.Unlock() + for _, globals := range r.componentGlobals { + if globals.featureGate != nil { + globals.featureGate.Close() + } + } + if r.emulationVersionConfig != nil || r.featureGatesConfig != nil { + klog.Warning("calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags") + } + r.emulationVersionConfig = []string{} + r.featureGatesConfig = make(map[string][]string) + + fs.StringSliceVar(&r.emulationVersionConfig, "emulated-version", r.emulationVersionConfig, ""+ + "The versions different components emulate their capabilities (APIs, features, ...) of.\n"+ + "If set, the component will emulate the behavior of this version instead of the underlying binary version.\n"+ + "Version format could only be major.minor, for example: '--emulated-version=wardle=1.2,kube=1.31'. Options are:\n"+strings.Join(r.unsafeVersionFlagOptions(true), "\n")+ + "If the component is not specified, defaults to \"kube\"") + + fs.Var(cliflag.NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(&r.featureGatesConfig), "feature-gates", "Comma-separated list of component:key=value pairs that describe feature gates for alpha/experimental features of different components.\n"+ + "If the component is not specified, defaults to \"kube\". This flag can be repeatedly invoked. For example: --feature-gates 'wardle:featureA=true,wardle:featureB=false' --feature-gates 'kube:featureC=true'"+ + "Options are:\n"+strings.Join(r.unsafeKnownFeatures(), "\n")) +} + +type componentVersion struct { + component string + ver *version.Version +} + +// getFullEmulationVersionConfig expands the given version config with version registered version mapping, +// and returns the map of component to Version. +func (r *componentGlobalsRegistry) getFullEmulationVersionConfig( + versionConfigMap map[string]*version.Version) (map[string]*version.Version, error) { + result := map[string]*version.Version{} + setQueue := []componentVersion{} + for comp, ver := range versionConfigMap { + if _, ok := r.componentGlobals[comp]; !ok { + return result, fmt.Errorf("component not registered: %s", comp) + } + klog.V(klogLevel).Infof("setting version %s=%s", comp, ver.String()) + setQueue = append(setQueue, componentVersion{comp, ver}) + } + for len(setQueue) > 0 { + cv := setQueue[0] + if _, visited := result[cv.component]; visited { + return result, fmt.Errorf("setting version of %s more than once, probably version mapping loop", cv.component) + } + setQueue = setQueue[1:] + result[cv.component] = cv.ver + for toComp, f := range r.componentGlobals[cv.component].emulationVersionMapping { + toVer := f(cv.ver) + if toVer == nil { + return result, fmt.Errorf("got nil version from mapping of %s=%s to component:%s", cv.component, cv.ver.String(), toComp) + } + klog.V(klogLevel).Infof("setting version %s=%s from version mapping of %s=%s", toComp, toVer.String(), cv.component, cv.ver.String()) + setQueue = append(setQueue, componentVersion{toComp, toVer}) + } + } + return result, nil +} + +func toVersionMap(versionConfig []string) (map[string]*version.Version, error) { + m := map[string]*version.Version{} + for _, compVer := range versionConfig { + // default to "kube" of component is not specified + k := "kube" + v := compVer + if strings.Contains(compVer, "=") { + arr := strings.SplitN(compVer, "=", 2) + if len(arr) != 2 { + return m, fmt.Errorf("malformed pair, expect string=string") + } + k = strings.TrimSpace(arr[0]) + v = strings.TrimSpace(arr[1]) + } + ver, err := version.Parse(v) + if err != nil { + return m, err + } + if ver.Patch() != 0 { + return m, fmt.Errorf("patch version not allowed, got: %s=%s", k, ver.String()) + } + if existingVer, ok := m[k]; ok { + return m, fmt.Errorf("duplicate version flag, %s=%s and %s=%s", k, existingVer.String(), k, ver.String()) + } + m[k] = ver + } + return m, nil +} + +func (r *componentGlobalsRegistry) SetFallback() error { + r.mutex.Lock() + set := r.set + r.mutex.Unlock() + if set { + return nil + } + klog.Warning("setting componentGlobalsRegistry in SetFallback. We recommend calling componentGlobalsRegistry.Set()" + + " right after parsing flags to avoid using feature gates before their final values are set by the flags.") + return r.Set() +} + +func (r *componentGlobalsRegistry) Set() error { + r.mutex.Lock() + defer r.mutex.Unlock() + r.set = true + emulationVersionConfigMap, err := toVersionMap(r.emulationVersionConfig) + if err != nil { + return err + } + for comp := range emulationVersionConfigMap { + if _, ok := r.componentGlobals[comp]; !ok { + return fmt.Errorf("component not registered: %s", comp) + } + // only components without any dependencies can be set from the flag. + if r.componentGlobals[comp].dependentEmulationVersion { + return fmt.Errorf("EmulationVersion of %s is set by mapping, cannot set it by flag", comp) + } + } + if emulationVersions, err := r.getFullEmulationVersionConfig(emulationVersionConfigMap); err != nil { + return err + } else { + for comp, ver := range emulationVersions { + r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver) + } + } + // Set feature gate emulation version before setting feature gate flag values. + for comp, globals := range r.componentGlobals { + if globals.featureGate == nil { + continue + } + klog.V(klogLevel).Infof("setting %s:feature gate emulation version to %s", comp, globals.effectiveVersion.EmulationVersion().String()) + if err := globals.featureGate.SetEmulationVersion(globals.effectiveVersion.EmulationVersion()); err != nil { + return err + } + } + for comp, fg := range r.featureGatesConfig { + if comp == "" { + if _, ok := r.featureGatesConfig[DefaultKubeComponent]; ok { + return fmt.Errorf("set kube feature gates with default empty prefix or kube: prefix consistently, do not mix use") + } + comp = DefaultKubeComponent + } + if _, ok := r.componentGlobals[comp]; !ok { + return fmt.Errorf("component not registered: %s", comp) + } + featureGate := r.componentGlobals[comp].featureGate + if featureGate == nil { + return fmt.Errorf("component featureGate not registered: %s", comp) + } + flagVal := strings.Join(fg, ",") + klog.V(klogLevel).Infof("setting %s:feature-gates=%s", comp, flagVal) + if err := featureGate.Set(flagVal); err != nil { + return err + } + } + return nil +} + +func (r *componentGlobalsRegistry) Validate() []error { + var errs []error + r.mutex.Lock() + defer r.mutex.Unlock() + for _, globals := range r.componentGlobals { + errs = append(errs, globals.effectiveVersion.Validate()...) + if globals.featureGate != nil { + errs = append(errs, globals.featureGate.Validate()...) + } + } + return errs +} + +func (r *componentGlobalsRegistry) SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error { + if f == nil { + return nil + } + klog.V(klogLevel).Infof("setting EmulationVersion mapping from %s to %s", fromComponent, toComponent) + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.componentGlobals[fromComponent]; !ok { + return fmt.Errorf("component not registered: %s", fromComponent) + } + if _, ok := r.componentGlobals[toComponent]; !ok { + return fmt.Errorf("component not registered: %s", toComponent) + } + // check multiple dependency + if r.componentGlobals[toComponent].dependentEmulationVersion { + return fmt.Errorf("mapping of %s already exists from another component", toComponent) + } + r.componentGlobals[toComponent].dependentEmulationVersion = true + + versionMapping := r.componentGlobals[fromComponent].emulationVersionMapping + if _, ok := versionMapping[toComponent]; ok { + return fmt.Errorf("EmulationVersion from %s to %s already exists", fromComponent, toComponent) + } + versionMapping[toComponent] = f + klog.V(klogLevel).Infof("setting the default EmulationVersion of %s based on mapping from the default EmulationVersion of %s", fromComponent, toComponent) + defaultFromVersion := r.componentGlobals[fromComponent].effectiveVersion.EmulationVersion() + emulationVersions, err := r.getFullEmulationVersionConfig(map[string]*version.Version{fromComponent: defaultFromVersion}) + if err != nil { + return err + } + for comp, ver := range emulationVersions { + r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/version/version.go b/vendor/k8s.io/apiserver/pkg/util/version/version.go new file mode 100644 index 0000000000..694d27a998 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/version/version.go @@ -0,0 +1,181 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/version" + baseversion "k8s.io/component-base/version" +) + +type EffectiveVersion interface { + BinaryVersion() *version.Version + EmulationVersion() *version.Version + MinCompatibilityVersion() *version.Version + EqualTo(other EffectiveVersion) bool + String() string + Validate() []error +} + +type MutableEffectiveVersion interface { + EffectiveVersion + Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) + SetEmulationVersion(emulationVersion *version.Version) + SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) +} + +type effectiveVersion struct { + // When true, BinaryVersion() returns the current binary version + useDefaultBuildBinaryVersion atomic.Bool + // Holds the last binary version stored in Set() + binaryVersion atomic.Pointer[version.Version] + // If the emulationVersion is set by the users, it could only contain major and minor versions. + // In tests, emulationVersion could be the same as the binary version, or set directly, + // which can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + emulationVersion atomic.Pointer[version.Version] + // minCompatibilityVersion could only contain major and minor versions. + minCompatibilityVersion atomic.Pointer[version.Version] +} + +func (m *effectiveVersion) BinaryVersion() *version.Version { + if m.useDefaultBuildBinaryVersion.Load() { + return defaultBuildBinaryVersion() + } + return m.binaryVersion.Load() +} + +func (m *effectiveVersion) EmulationVersion() *version.Version { + ver := m.emulationVersion.Load() + if ver != nil { + // Emulation version can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + // The pre-release should not be accessible to the users. + return ver.WithPreRelease(m.BinaryVersion().PreRelease()) + } + return ver +} + +func (m *effectiveVersion) MinCompatibilityVersion() *version.Version { + return m.minCompatibilityVersion.Load() +} + +func (m *effectiveVersion) EqualTo(other EffectiveVersion) bool { + return m.BinaryVersion().EqualTo(other.BinaryVersion()) && m.EmulationVersion().EqualTo(other.EmulationVersion()) && m.MinCompatibilityVersion().EqualTo(other.MinCompatibilityVersion()) +} + +func (m *effectiveVersion) String() string { + if m == nil { + return "" + } + return fmt.Sprintf("{BinaryVersion: %s, EmulationVersion: %s, MinCompatibilityVersion: %s}", + m.BinaryVersion().String(), m.EmulationVersion().String(), m.MinCompatibilityVersion().String()) +} + +func majorMinor(ver *version.Version) *version.Version { + if ver == nil { + return ver + } + return version.MajorMinor(ver.Major(), ver.Minor()) +} + +func (m *effectiveVersion) Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) { + m.binaryVersion.Store(binaryVersion) + m.useDefaultBuildBinaryVersion.Store(false) + m.emulationVersion.Store(majorMinor(emulationVersion)) + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) SetEmulationVersion(emulationVersion *version.Version) { + m.emulationVersion.Store(majorMinor(emulationVersion)) +} + +func (m *effectiveVersion) SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) { + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) Validate() []error { + var errs []error + // Validate only checks the major and minor versions. + binaryVersion := m.BinaryVersion().WithPatch(0) + emulationVersion := m.emulationVersion.Load() + minCompatibilityVersion := m.minCompatibilityVersion.Load() + + // emulationVersion can only be 1.{binaryMinor-1}...1.{binaryMinor}. + maxEmuVer := binaryVersion + minEmuVer := binaryVersion.SubtractMinor(1) + if emulationVersion.GreaterThan(maxEmuVer) || emulationVersion.LessThan(minEmuVer) { + errs = append(errs, fmt.Errorf("emulation version %s is not between [%s, %s]", emulationVersion.String(), minEmuVer.String(), maxEmuVer.String())) + } + // minCompatibilityVersion can only be 1.{binaryMinor-1} for alpha. + maxCompVer := binaryVersion.SubtractMinor(1) + minCompVer := binaryVersion.SubtractMinor(1) + if minCompatibilityVersion.GreaterThan(maxCompVer) || minCompatibilityVersion.LessThan(minCompVer) { + errs = append(errs, fmt.Errorf("minCompatibilityVersion version %s is not between [%s, %s]", minCompatibilityVersion.String(), minCompVer.String(), maxCompVer.String())) + } + return errs +} + +func newEffectiveVersion(binaryVersion *version.Version, useDefaultBuildBinaryVersion bool) MutableEffectiveVersion { + effective := &effectiveVersion{} + compatVersion := binaryVersion.SubtractMinor(1) + effective.Set(binaryVersion, binaryVersion, compatVersion) + effective.useDefaultBuildBinaryVersion.Store(useDefaultBuildBinaryVersion) + return effective +} + +func NewEffectiveVersion(binaryVer string) MutableEffectiveVersion { + if binaryVer == "" { + return &effectiveVersion{} + } + binaryVersion := version.MustParse(binaryVer) + return newEffectiveVersion(binaryVersion, false) +} + +func defaultBuildBinaryVersion() *version.Version { + verInfo := baseversion.Get() + return version.MustParse(verInfo.String()).WithInfo(verInfo) +} + +// DefaultBuildEffectiveVersion returns the MutableEffectiveVersion based on the +// current build information. +func DefaultBuildEffectiveVersion() MutableEffectiveVersion { + binaryVersion := defaultBuildBinaryVersion() + if binaryVersion.Major() == 0 && binaryVersion.Minor() == 0 { + return DefaultKubeEffectiveVersion() + } + return newEffectiveVersion(binaryVersion, true) +} + +// DefaultKubeEffectiveVersion returns the MutableEffectiveVersion based on the +// latest K8s release. +func DefaultKubeEffectiveVersion() MutableEffectiveVersion { + binaryVersion := version.MustParse(baseversion.DefaultKubeBinaryVersion).WithInfo(baseversion.Get()) + return newEffectiveVersion(binaryVersion, false) +} + +// ValidateKubeEffectiveVersion validates the EmulationVersion is equal to the binary version at 1.31 for kube components. +// TODO: remove in 1.32 +// emulationVersion is introduced in 1.31, so it is only allowed to be equal to the binary version at 1.31. +func ValidateKubeEffectiveVersion(effectiveVersion EffectiveVersion) error { + binaryVersion := version.MajorMinor(effectiveVersion.BinaryVersion().Major(), effectiveVersion.BinaryVersion().Minor()) + if binaryVersion.EqualTo(version.MajorMinor(1, 31)) && !effectiveVersion.EmulationVersion().EqualTo(binaryVersion) { + return fmt.Errorf("emulation version needs to be equal to binary version(%s) in compatibility-version alpha, got %s", + binaryVersion.String(), effectiveVersion.EmulationVersion().String()) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go index 695c00d176..9cf258b723 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go @@ -23,8 +23,18 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/transport" ) +func ValidateCABundle(fldPath *field.Path, caBundle []byte) field.ErrorList { + var allErrors field.ErrorList + _, err := transport.TLSConfigFor(&transport.Config{TLS: transport.TLSConfig{CAData: caBundle}}) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, caBundle, err.Error())) + } + return allErrors +} + // ValidateWebhookURL validates webhook's URL. func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList { var allErrors field.ErrorList diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index d97b121453..ebc4949d92 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -32,14 +32,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/cache" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/apis/apiserver" apiservervalidation "k8s.io/apiserver/pkg/apis/apiserver/validation" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" authorizationcel "k8s.io/apiserver/pkg/authorization/cel" - "k8s.io/apiserver/pkg/features" + genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics" @@ -195,15 +197,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri } if attr.IsResourceRequest() { - r.Spec.ResourceAttributes = &authorizationv1.ResourceAttributes{ - Namespace: attr.GetNamespace(), - Verb: attr.GetVerb(), - Group: attr.GetAPIGroup(), - Version: attr.GetAPIVersion(), - Resource: attr.GetResource(), - Subresource: attr.GetSubresource(), - Name: attr.GetName(), - } + r.Spec.ResourceAttributes = resourceAttributesFrom(attr) } else { r.Spec.NonResourceAttributes = &authorizationv1.NonResourceAttributes{ Path: attr.GetPath(), @@ -211,7 +205,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri } } // skipping match when feature is not enabled - if utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration) { + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StructuredAuthorizationConfiguration) { // Process Match Conditions before calling the webhook matches, err := w.match(ctx, r) // If at least one matchCondition evaluates to an error (but none are FALSE): @@ -251,7 +245,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri metricsResult = "success" case ctx.Err() != nil: metricsResult = "canceled" - case errors.Is(sarErr, context.DeadlineExceeded) || apierrors.IsTimeout(sarErr) || statusCode == http.StatusGatewayTimeout: + case utilnet.IsTimeout(sarErr) || errors.Is(sarErr, context.DeadlineExceeded) || apierrors.IsTimeout(sarErr) || statusCode == http.StatusGatewayTimeout: metricsResult = "timeout" default: metricsResult = "error" @@ -304,6 +298,109 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri } +func resourceAttributesFrom(attr authorizer.Attributes) *authorizationv1.ResourceAttributes { + ret := &authorizationv1.ResourceAttributes{ + Namespace: attr.GetNamespace(), + Verb: attr.GetVerb(), + Group: attr.GetAPIGroup(), + Version: attr.GetAPIVersion(), + Resource: attr.GetResource(), + Subresource: attr.GetSubresource(), + Name: attr.GetName(), + } + + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + // If we are able to get any requirements while parsing selectors, use them, even if there's an error. + // This is because selectors only narrow, so if a subset of selector requirements are available, the request can be allowed. + if selectorRequirements, _ := fieldSelectorToAuthorizationAPI(attr); len(selectorRequirements) > 0 { + ret.FieldSelector = &authorizationv1.FieldSelectorAttributes{ + Requirements: selectorRequirements, + } + } + + if selectorRequirements, _ := labelSelectorToAuthorizationAPI(attr); len(selectorRequirements) > 0 { + ret.LabelSelector = &authorizationv1.LabelSelectorAttributes{ + Requirements: selectorRequirements, + } + } + } + + return ret +} + +func fieldSelectorToAuthorizationAPI(attr authorizer.Attributes) ([]metav1.FieldSelectorRequirement, error) { + requirements, getFieldSelectorErr := attr.GetFieldSelector() + if len(requirements) == 0 { + return nil, getFieldSelectorErr + } + + retRequirements := []metav1.FieldSelectorRequirement{} + for _, requirement := range requirements { + retRequirement := metav1.FieldSelectorRequirement{} + switch { + case requirement.Operator == selection.Equals || requirement.Operator == selection.DoubleEquals || requirement.Operator == selection.In: + retRequirement.Operator = metav1.FieldSelectorOpIn + retRequirement.Key = requirement.Field + retRequirement.Values = []string{requirement.Value} + case requirement.Operator == selection.NotEquals || requirement.Operator == selection.NotIn: + retRequirement.Operator = metav1.FieldSelectorOpNotIn + retRequirement.Key = requirement.Field + retRequirement.Values = []string{requirement.Value} + default: + // ignore this particular requirement. since requirements are AND'd, it is safe to ignore unknown requirements + // for authorization since the resulting check will only be as broad or broader than the intended. + continue + } + retRequirements = append(retRequirements, retRequirement) + } + + if len(retRequirements) == 0 { + // this means that all requirements were dropped (likely due to unknown operators), so we are checking the broader + // unrestricted action. + return nil, getFieldSelectorErr + } + return retRequirements, getFieldSelectorErr +} + +func labelSelectorToAuthorizationAPI(attr authorizer.Attributes) ([]metav1.LabelSelectorRequirement, error) { + requirements, getLabelSelectorErr := attr.GetLabelSelector() + if len(requirements) == 0 { + return nil, getLabelSelectorErr + } + + retRequirements := []metav1.LabelSelectorRequirement{} + for _, requirement := range requirements { + retRequirement := metav1.LabelSelectorRequirement{ + Key: requirement.Key(), + } + if values := requirement.ValuesUnsorted(); len(values) > 0 { + retRequirement.Values = values + } + switch requirement.Operator() { + case selection.Equals, selection.DoubleEquals, selection.In: + retRequirement.Operator = metav1.LabelSelectorOpIn + case selection.NotEquals, selection.NotIn: + retRequirement.Operator = metav1.LabelSelectorOpNotIn + case selection.Exists: + retRequirement.Operator = metav1.LabelSelectorOpExists + case selection.DoesNotExist: + retRequirement.Operator = metav1.LabelSelectorOpDoesNotExist + default: + // ignore this particular requirement. since requirements are AND'd, it is safe to ignore unknown requirements + // for authorization since the resulting check will only be as broad or broader than the intended. + continue + } + retRequirements = append(retRequirements, retRequirement) + } + + if len(retRequirements) == 0 { + // this means that all requirements were dropped (likely due to unknown operators), so we are checking the broader + // unrestricted action. + return nil, getLabelSelectorErr + } + return retRequirements, getLabelSelectorErr +} + // TODO: need to finish the method to get the rules when using webhook mode func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { var ( @@ -474,13 +571,15 @@ func v1ResourceAttributesToV1beta1ResourceAttributes(in *authorizationv1.Resourc return nil } return &authorizationv1beta1.ResourceAttributes{ - Namespace: in.Namespace, - Verb: in.Verb, - Group: in.Group, - Version: in.Version, - Resource: in.Resource, - Subresource: in.Subresource, - Name: in.Name, + Namespace: in.Namespace, + Verb: in.Verb, + Group: in.Group, + Version: in.Version, + Resource: in.Resource, + Subresource: in.Subresource, + Name: in.Name, + FieldSelector: in.FieldSelector, + LabelSelector: in.LabelSelector, } } diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go index d43b0c25e0..adc496feb8 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go @@ -21,29 +21,40 @@ import ( ) // NewSimpleFakeResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided -func NewSimpleFakeResourceFinder(infos ...*resource.Info) ResourceFinder { - return &fakeResourceFinder{ +func NewSimpleFakeResourceFinder(infos ...*resource.Info) *FakeResourceFinder { + return &FakeResourceFinder{ Infos: infos, } } -type fakeResourceFinder struct { +func (f *FakeResourceFinder) WithError(err error) *FakeResourceFinder { + f.err = err + return f +} + +type FakeResourceFinder struct { Infos []*resource.Info + err error } // Do implements the interface -func (f *fakeResourceFinder) Do() resource.Visitor { +func (f *FakeResourceFinder) Do() resource.Visitor { return &fakeResourceResult{ Infos: f.Infos, + err: f.err, } } type fakeResourceResult struct { Infos []*resource.Info + err error } // Visit just iterates over info func (r *fakeResourceResult) Visit(fn resource.VisitorFunc) error { + if r.err != nil { + return r.err + } for _, info := range r.Infos { err := fn(info, nil) if err != nil { diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go index a5f307de56..e3cee34733 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go @@ -21,9 +21,9 @@ import ( "path/filepath" "strings" - jsonpatch "github.com/evanphx/json-patch" "github.com/spf13/cobra" "github.com/spf13/pflag" + jsonpatch "gopkg.in/evanphx/json-patch.v4" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/k8s.io/client-go/applyconfigurations/OWNERS b/vendor/k8s.io/client-go/applyconfigurations/OWNERS new file mode 100644 index 0000000000..ea0928429d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse + - jpbetz diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go index 64422c1df4..0d50d44ac2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go index 38b7475cc4..1f890bcfcb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go index ea1dc377b9..d8a816f1e2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go index d8e9828947..e8e371d7dd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct { MatchPolicy *apiadmissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index faff51a041..cd8096f902 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -40,7 +40,7 @@ type MutatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go index 61c8f667d2..58b71d6d58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct { Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go index be8d5206cb..eda3bf635a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go @@ -22,14 +22,14 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go index b77a30cf91..07577929ab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go index b52becda5e..73cda9b04d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct { ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go index 41d4179df4..36a93643c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/admissionregistration/v1" ) -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use +// RuleApplyConfiguration represents a declarative configuration of the Rule type for use // with apply. type RuleApplyConfiguration struct { APIGroups []string `json:"apiGroups,omitempty"` @@ -31,7 +31,7 @@ type RuleApplyConfiguration struct { Scope *v1.ScopeType `json:"scope,omitempty"` } -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with +// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with // apply. func Rule() *RuleApplyConfiguration { return &RuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go index 59bbb8fe3d..92bddd502a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/admissionregistration/v1" ) -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use +// RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use // with apply. type RuleWithOperationsApplyConfiguration struct { Operations []v1.OperationType `json:"operations,omitempty"` RuleApplyConfiguration `json:",inline"` } -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with +// RuleWithOperationsApplyConfiguration constructs a declarative configuration of the RuleWithOperations type for use with // apply. func RuleWithOperations() *RuleWithOperationsApplyConfiguration { return &RuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go index 2cd55d9ea2..239780664d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go index 8621ce71ec..723d10ecf5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go index fc96a8bdc6..841209cae1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct { Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go index 5bc41a0f52..1acad056f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct { Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go index da6ecbe371..eb426af42a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go index eb930b9b1c..1635b30a61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go index 25cd67f08d..e6f4e84591 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index 613856bac7..a2c705eb5c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -39,7 +39,7 @@ type ValidatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go index 811bfdf0b6..0d1a6c81ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct { Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go index ac29d14362..2a828b6b4f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { Expression *string `json:"expression,omitempty"` @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go index d55f29a38b..9dd20afa72 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go index aa358ae205..77f2227b95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go index 023695139d..958a537406 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go index f8b511f512..f36c2f0f5c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go index 186c750f96..7f983dcb22 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go index a6710ac7ed..e443535b6a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct { MatchPolicy *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go index bb2a7ba890..5e6744fd74 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go @@ -23,14 +23,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` v1.RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go index 350993cea0..daf17fb249 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go index 0951cae8a9..c4fff1d475 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct { ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go index 42a9170710..d1a7fff50e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go index c860b85cf7..fe60eb5f25 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct { Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index dc08226404..0c11ee5945 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct { Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go index c9a4ff7ab4..0f8e4e4357 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go index 7ee320e428..d5d3529949 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go index 821184c8a8..2fec5ba477 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go index 9a5fc8475a..5f73043734 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { Expression *string `json:"expression,omitempty"` @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go index 2c70a8cfb5..0459dae655 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go index e92fba0ddb..8718db9447 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go index 059c1b94ba..66cfc8cdc7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go index d099b6b6ea..63db7fc801 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go index 25d4139db6..4005e55a33 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct { MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index 54845341f4..b2ab76aefd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -41,7 +41,7 @@ type MutatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 10dd034e25..51bb823896 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct { Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go index fa346c4a57..5de70c7ad3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go @@ -23,14 +23,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` v1.RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go index 6050e60251..3983125281 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go index 2be98dbc52..0a94ae0673 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct { ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go index c21b574908..70cc6b5b27 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go index 07baf334cd..cea6e11dee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go index e144bc9f70..c29ee56cbe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct { Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 0dc06aedec..4347c4810c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct { Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go index d20a78efff..bddc3a40c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go index c6e9389103..8b235337d7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go index e3e6d417ed..4612af0cff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 8c5c341bad..1e107d68f7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -40,7 +40,7 @@ type ValidatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 75f1b9d716..c3535c180c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct { Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go index ed9ff1ac0c..019e8e7aa9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { Expression *string `json:"expression,omitempty"` @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go index 0fc294c65d..0ece197db2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go index 490f9d5f3f..76ff71b4ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go index 81c56330bb..8394298b93 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// ServerStorageVersionApplyConfiguration represents an declarative configuration of the ServerStorageVersion type for use +// ServerStorageVersionApplyConfiguration represents a declarative configuration of the ServerStorageVersion type for use // with apply. type ServerStorageVersionApplyConfiguration struct { APIServerID *string `json:"apiServerID,omitempty"` @@ -27,7 +27,7 @@ type ServerStorageVersionApplyConfiguration struct { ServedVersions []string `json:"servedVersions,omitempty"` } -// ServerStorageVersionApplyConfiguration constructs an declarative configuration of the ServerStorageVersion type for use with +// ServerStorageVersionApplyConfiguration constructs a declarative configuration of the ServerStorageVersion type for use with // apply. func ServerStorageVersion() *ServerStorageVersionApplyConfiguration { return &ServerStorageVersionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go index 6b9f178390..d734328b06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageVersionApplyConfiguration represents an declarative configuration of the StorageVersion type for use +// StorageVersionApplyConfiguration represents a declarative configuration of the StorageVersion type for use // with apply. type StorageVersionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StorageVersionApplyConfiguration struct { Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"` } -// StorageVersion constructs an declarative configuration of the StorageVersion type for use with +// StorageVersion constructs a declarative configuration of the StorageVersion type for use with // apply. func StorageVersion(name string) *StorageVersionApplyConfiguration { b := &StorageVersionApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *StorageVersionApplyConfiguration) WithStatus(value *StorageVersionStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go index 75b6256478..68d894d0c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StorageVersionConditionApplyConfiguration represents an declarative configuration of the StorageVersionCondition type for use +// StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use // with apply. type StorageVersionConditionApplyConfiguration struct { Type *v1alpha1.StorageVersionConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StorageVersionConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StorageVersionConditionApplyConfiguration constructs an declarative configuration of the StorageVersionCondition type for use with +// StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with // apply. func StorageVersionCondition() *StorageVersionConditionApplyConfiguration { return &StorageVersionConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go index 43b0bf71b1..2e25d67524 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// StorageVersionStatusApplyConfiguration represents an declarative configuration of the StorageVersionStatus type for use +// StorageVersionStatusApplyConfiguration represents a declarative configuration of the StorageVersionStatus type for use // with apply. type StorageVersionStatusApplyConfiguration struct { StorageVersions []ServerStorageVersionApplyConfiguration `json:"storageVersions,omitempty"` @@ -26,7 +26,7 @@ type StorageVersionStatusApplyConfiguration struct { Conditions []StorageVersionConditionApplyConfiguration `json:"conditions,omitempty"` } -// StorageVersionStatusApplyConfiguration constructs an declarative configuration of the StorageVersionStatus type for use with +// StorageVersionStatusApplyConfiguration constructs a declarative configuration of the StorageVersionStatus type for use with // apply. func StorageVersionStatus() *StorageVersionStatusApplyConfiguration { return &StorageVersionStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go index c4e2085078..25b6450591 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go index cc9fdcd5dd..a157856514 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go index 283ae10a29..de91745b83 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go index 5e808874b7..99dc5abae8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go index d1c4462aa9..a40dc16512 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go index f1ba18226f..15af4e66be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go index 13edda7727..52b7a21b71 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go index 7747044136..84df752bc1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go index 812253dae8..063f1c2765 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go index 7b48b42557..747813ade8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go index e9571edab1..dc4b97c55a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go index 4e7818e535..35ca4e4dff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go index 19b0355d15..32da80842c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go index ca32865835..0390584867 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go index 12f41490f9..a1408ae25e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go index ebe8e86d1f..e898f5081c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go index ca9daaf249..2bc2937241 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go index c1b5dea855..dd0de81a6c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go index 24041d99f8..6f2b340dab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go index f9d47850d6..c62a5e854c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go index 9778f1c4a0..86f39e16c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go index ba01d5d3c1..cd65fd4364 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index 81afdca596..1848a963cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go index d88881b656..637a1c649d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go index 5268a1e065..b59e107355 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go index 827c063598..606de58a1e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go index e22f76b665..145aaed70d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go index 9da8ce0899..504dddd94e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go index 5e18476bdc..5531c756f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go index f8d1cf5d25..adc023a34d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go index 7279318a88..2c322b4ace 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go index 131e57a39d..775f82eef8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go index dde5f064b0..244701a5e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go index 8989a08d2c..94c2971343 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go index ed5cfab41c..2705938862 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go index 97e994ab71..8a17391cd2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1beta1.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go index 8f349a2d27..2e3049e5e2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go index 0048724c04..69a8ee0f0b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index 1eb1ba7b03..ac325d717e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go index f31066b6ff..27ae7540fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go index 895c1e7f8a..7714ebbb72 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go index 4abab6851c..5f75a45510 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go index 906a8ca46e..9ffda6182e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go index 55dc1f4877..8315050f0f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1beta2.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go index 48137819af..74d8bf51c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go index 29cda7a90e..6b0fda8953 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go index 07fc07fc6a..7d66f1da43 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go index 7e39e67510..485da788af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go index 852a2c6832..1924278741 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta2.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go index 6898941ace..1b55130c6b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go index fe99ca9917..5fa9122332 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go index 8714e153e4..c769436ee0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta2.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go index d9303e1b22..d8608aa51c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go index 47776bfa2e..beec546f7c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1beta2.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go index 14d548169e..1d77b9e0fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go index 7c1b8fb29d..d3c92e274d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go index b586b678d4..ad6021d37a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go index 78ef210081..b0cc3a4ee4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go index 4a12e51c0a..0046c264bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go index 0e89668cb3..126ab2d8bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -34,7 +34,7 @@ type ScaleApplyConfiguration struct { Status *v1beta2.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleAp b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go index 03d5428b4b..3d2b5d1917 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go index c33e68b5e2..aa45db686e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1beta2.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go index c586da775c..a899243a5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go index aee27803d3..318e5f4642 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index b6165fbd9a..bebf80c896 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go index 63835904c1..a647cd7d26 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go index 03c2914917..81d4ba1df3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go index 0eac22692c..51ec665012 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go index 38fa205841..8150635ee6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go index 561ac60d35..0ca2f84ea9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go index abc2e05aa7..fcb231c3be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -32,7 +32,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go index f770922803..40f3db8c5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -33,7 +33,7 @@ type ScaleApplyConfiguration struct { Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -215,3 +215,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguratio b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go index 2339a8fef2..025004ba5f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScaleSpecApplyConfiguration represents an declarative configuration of the ScaleSpec type for use +// ScaleSpecApplyConfiguration represents a declarative configuration of the ScaleSpec type for use // with apply. type ScaleSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` } -// ScaleSpecApplyConfiguration constructs an declarative configuration of the ScaleSpec type for use with +// ScaleSpecApplyConfiguration constructs a declarative configuration of the ScaleSpec type for use with // apply. func ScaleSpec() *ScaleSpecApplyConfiguration { return &ScaleSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go index 81c8d1b30a..51f96d2357 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ScaleStatusApplyConfiguration represents an declarative configuration of the ScaleStatus type for use +// ScaleStatusApplyConfiguration represents a declarative configuration of the ScaleStatus type for use // with apply. type ScaleStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` Selector *string `json:"selector,omitempty"` } -// ScaleStatusApplyConfiguration constructs an declarative configuration of the ScaleStatus type for use with +// ScaleStatusApplyConfiguration constructs a declarative configuration of the ScaleStatus type for use with // apply. func ScaleStatus() *ScaleStatusApplyConfiguration { return &ScaleStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go index 15ef216d1b..b6e071e848 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go index 34213bca3f..46bd2bac20 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go index 19045706dc..645f098577 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go index 11a8eff263..a9c45b31a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go index 3b1a0329b8..4280086f5e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go index 31061de85e..e26b530c18 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go index e6fdabd7c8..05750cc21d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go index c020eccd3d..844c6dc862 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go index c36bc3f225..e34ababc58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go index d4d551df85..f1a2c3f4e9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go index 139f0fb5c7..b8b735747b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { Type *v2.HPAScalingPolicyType `json:"type,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct { PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go index e768076aa4..c7020f77bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct { Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go index 312ad3ddd6..2f99f7d0b4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go index 094ead6c16..89e6b5c68b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go index c65ad446f0..86ae3348b6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go index f301e4d2be..bf68a1c346 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { Type *v2.MetricTargetType `json:"type,omitempty"` @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go index e8474b1890..59732548b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go index a9482565e0..2391fa5c22 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go index 70ba43bedd..9ffd0c180d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go index 0a7a5c2595..28a35a2ae1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go index 865fcc33e3..4614282ce1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go index 25a065fef6..ffc9042b9f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go index fb5625afab..0fdbfcb555 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go index 2594e8e072..f41c5af10f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go index ae897237c4..4cd56eea37 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go index fe3d15e866..f03261612e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go index c118e6ca1e..8dce4529dd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go index ab771214e2..4034d7e55c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go index 66b8d5f738..93e37eaffa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go index de3e6ea5cd..8bb82298d1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go index 761d94a850..6f111ceafd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go index 95ec5be43b..391b577258 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go index 70beec84e0..961e2c5b48 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go @@ -22,7 +22,7 @@ import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2beta1.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go index b03ea2f9e4..587b5a1f88 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go @@ -22,7 +22,7 @@ import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2beta1.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go index 07d467972e..a9e2eead4d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricSourceApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go index b5e0d3e3d2..4d3be8df6c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricStatusApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go index a4122b8989..cfcd752e24 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricSourceApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go index d6172011b7..f7a7777fd4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricStatusApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go index 804f3f4926..ad97d83c3c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go index 5fdc29c132..78fbeaad06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go index aa334744ea..1050165ea3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go index bf0822a066..708f68bc6b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go index 2903629bc8..c281084b16 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go index 80053a6b33..d34ca11494 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go index 71ac35adbc..be29e607fa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go index 1c750cb164..ce666f0f3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go index ec41bfadea..e9b1a9fb9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go index 0f0cae75d3..a73e7ebaa8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go index c60adee581..9629e4bd59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go index 881a874e51..1eee645050 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go index 2a535891af..b799f99e0d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { Type *v2beta2.HPAScalingPolicyType `json:"type,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct { PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go index 57c917b894..f7e8d9ae3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct { Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go index 70cbd4e815..e8b2abb0e6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go index 1e7ee1419d..3ec7108618 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2beta2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go index 353ec6d943..40d32795bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2beta2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go index fbf006a5a6..aeec3102ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { Type *v2beta2.MetricTargetType `json:"type,omitempty"` @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go index 5796a0b4c1..cc409fc283 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go index eed31dab61..17b492fa06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go index 175e2120d6..e87417f2e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go index 0365880950..6ecbb18071 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go index e6f98be8c4..cd10297261 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go index cc8118d5e3..c482d75f4b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go index 0ab56be0f7..eb13e90b7d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go index 5225a5a079..8b26816e58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct { Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go index 22a34dcb61..62f9b5298b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/batch/v1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` @@ -35,7 +35,7 @@ type CronJobSpecApplyConfiguration struct { FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go index b7cc2bdfb5..095dfe017f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct { LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go index fb10ba3968..1333e91844 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobApplyConfiguration represents an declarative configuration of the Job type for use +// JobApplyConfiguration represents a declarative configuration of the Job type for use // with apply. type JobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type JobApplyConfiguration struct { Status *JobStatusApplyConfiguration `json:"status,omitempty"` } -// Job constructs an declarative configuration of the Job type for use with +// Job constructs a declarative configuration of the Job type for use with // apply. func Job(name, namespace string) *JobApplyConfiguration { b := &JobApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *JobApplyConfiguration) WithStatus(value *JobStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go index 388ca7a1c0..4f15bc6045 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobConditionApplyConfiguration represents an declarative configuration of the JobCondition type for use +// JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use // with apply. type JobConditionApplyConfiguration struct { Type *v1.JobConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type JobConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// JobConditionApplyConfiguration constructs an declarative configuration of the JobCondition type for use with +// JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with // apply. func JobCondition() *JobConditionApplyConfiguration { return &JobConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go index bbcff71c86..2104fe113d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobSpecApplyConfiguration represents an declarative configuration of the JobSpec type for use +// JobSpecApplyConfiguration represents a declarative configuration of the JobSpec type for use // with apply. type JobSpecApplyConfiguration struct { Parallelism *int32 `json:"parallelism,omitempty"` @@ -45,7 +45,7 @@ type JobSpecApplyConfiguration struct { ManagedBy *string `json:"managedBy,omitempty"` } -// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with +// JobSpecApplyConfiguration constructs a declarative configuration of the JobSpec type for use with // apply. func JobSpec() *JobSpecApplyConfiguration { return &JobSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index e8e472f8f7..071a0153f5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobStatusApplyConfiguration represents an declarative configuration of the JobStatus type for use +// JobStatusApplyConfiguration represents a declarative configuration of the JobStatus type for use // with apply. type JobStatusApplyConfiguration struct { Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"` @@ -38,7 +38,7 @@ type JobStatusApplyConfiguration struct { Ready *int32 `json:"ready,omitempty"` } -// JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with +// JobStatusApplyConfiguration constructs a declarative configuration of the JobStatus type for use with // apply. func JobStatus() *JobStatusApplyConfiguration { return &JobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go index b37a815680..901c4228e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *JobSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go index 6da98386c9..05a68b3c94 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodFailurePolicyApplyConfiguration represents an declarative configuration of the PodFailurePolicy type for use +// PodFailurePolicyApplyConfiguration represents a declarative configuration of the PodFailurePolicy type for use // with apply. type PodFailurePolicyApplyConfiguration struct { Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// PodFailurePolicyApplyConfiguration constructs an declarative configuration of the PodFailurePolicy type for use with +// PodFailurePolicyApplyConfiguration constructs a declarative configuration of the PodFailurePolicy type for use with // apply. func PodFailurePolicy() *PodFailurePolicyApplyConfiguration { return &PodFailurePolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go index 65f6251810..cd32296ca0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/batch/v1" ) -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use // with apply. type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { ContainerName *string `json:"containerName,omitempty"` @@ -30,7 +30,7 @@ type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { Values []int32 `json:"values,omitempty"` } -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with // apply. func PodFailurePolicyOnExitCodesRequirement() *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { return &PodFailurePolicyOnExitCodesRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go index da1556ff8b..07af4fb0e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use // with apply. type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct { Type *v1.PodConditionType `json:"type,omitempty"` Status *v1.ConditionStatus `json:"status,omitempty"` } -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with // apply. func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { return &PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go index d435243531..b004921d38 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/batch/v1" ) -// PodFailurePolicyRuleApplyConfiguration represents an declarative configuration of the PodFailurePolicyRule type for use +// PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use // with apply. type PodFailurePolicyRuleApplyConfiguration struct { Action *v1.PodFailurePolicyAction `json:"action,omitempty"` @@ -30,7 +30,7 @@ type PodFailurePolicyRuleApplyConfiguration struct { OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"` } -// PodFailurePolicyRuleApplyConfiguration constructs an declarative configuration of the PodFailurePolicyRule type for use with +// PodFailurePolicyRuleApplyConfiguration constructs a declarative configuration of the PodFailurePolicyRule type for use with // apply. func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration { return &PodFailurePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go index 327aa1f5a4..a3f4f39e2e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SuccessPolicyApplyConfiguration represents an declarative configuration of the SuccessPolicy type for use +// SuccessPolicyApplyConfiguration represents a declarative configuration of the SuccessPolicy type for use // with apply. type SuccessPolicyApplyConfiguration struct { Rules []SuccessPolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// SuccessPolicyApplyConfiguration constructs an declarative configuration of the SuccessPolicy type for use with +// SuccessPolicyApplyConfiguration constructs a declarative configuration of the SuccessPolicy type for use with // apply. func SuccessPolicy() *SuccessPolicyApplyConfiguration { return &SuccessPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go index 4c862e6821..2b5e3d91fe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SuccessPolicyRuleApplyConfiguration represents an declarative configuration of the SuccessPolicyRule type for use +// SuccessPolicyRuleApplyConfiguration represents a declarative configuration of the SuccessPolicyRule type for use // with apply. type SuccessPolicyRuleApplyConfiguration struct { SucceededIndexes *string `json:"succeededIndexes,omitempty"` SucceededCount *int32 `json:"succeededCount,omitempty"` } -// SuccessPolicyRuleApplyConfiguration constructs an declarative configuration of the SuccessPolicyRule type for use with +// SuccessPolicyRuleApplyConfiguration constructs a declarative configuration of the SuccessPolicyRule type for use with // apply. func SuccessPolicyRule() *SuccessPolicyRuleApplyConfiguration { return &SuccessPolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go index 1409303fff..ff6b57b86c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// UncountedTerminatedPodsApplyConfiguration represents an declarative configuration of the UncountedTerminatedPods type for use +// UncountedTerminatedPodsApplyConfiguration represents a declarative configuration of the UncountedTerminatedPods type for use // with apply. type UncountedTerminatedPodsApplyConfiguration struct { Succeeded []types.UID `json:"succeeded,omitempty"` Failed []types.UID `json:"failed,omitempty"` } -// UncountedTerminatedPodsApplyConfiguration constructs an declarative configuration of the UncountedTerminatedPods type for use with +// UncountedTerminatedPodsApplyConfiguration constructs a declarative configuration of the UncountedTerminatedPods type for use with // apply. func UncountedTerminatedPods() *UncountedTerminatedPodsApplyConfiguration { return &UncountedTerminatedPodsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go index 1d735a8407..765ed5e651 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct { Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go index 68c0777de0..21043690da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/batch/v1beta1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` @@ -35,7 +35,7 @@ type CronJobSpecApplyConfiguration struct { FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go index 8dca14f663..335f9e0dce 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct { LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go index f925d65a7e..5fd2485c69 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go @@ -25,14 +25,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *batchv1.JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -187,3 +187,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *batchv1.JobSpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go index 3d02c0be80..e30bb62427 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct { Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go index 13d69cfcef..7a4bfce011 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { Type *v1.RequestConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct { LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go index 81ca214a9d..9c4a85693a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/certificates/v1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { Request []byte `json:"request,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct { Extra map[string]v1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go index 59d5930331..897f6d1e98 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go index 788d2a07dc..9cd10bc56a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use +// ClusterTrustBundleApplyConfiguration represents a declarative configuration of the ClusterTrustBundle type for use // with apply. type ClusterTrustBundleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ClusterTrustBundleApplyConfiguration struct { Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` } -// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with +// ClusterTrustBundle constructs a declarative configuration of the ClusterTrustBundle type for use with // apply. func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { b := &ClusterTrustBundleApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundl b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterTrustBundleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go index d1aea1d6dc..7bb36f7084 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use +// ClusterTrustBundleSpecApplyConfiguration represents a declarative configuration of the ClusterTrustBundleSpec type for use // with apply. type ClusterTrustBundleSpecApplyConfiguration struct { SignerName *string `json:"signerName,omitempty"` TrustBundle *string `json:"trustBundle,omitempty"` } -// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with +// ClusterTrustBundleSpecApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleSpec type for use with // apply. func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { return &ClusterTrustBundleSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go index 83a0edc18f..d6e08824a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct { Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go index 2c32a3272c..6e3692d1c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { Type *v1beta1.RequestConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct { LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go index 9554b1f400..9284eca3a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/certificates/v1beta1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { Request []byte `json:"request,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct { Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go index 9d8c5d4585..f82e8aed3b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go index 618f12fb21..ffd84583f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct { Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go index a5f6a6ebba..01d0df1380 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1 import ( + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..ef76847791 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// LeaseCandidateApplyConfiguration represents a declarative configuration of the LeaseCandidate type for use +// with apply. +type LeaseCandidateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// LeaseCandidate constructs a declarative configuration of the LeaseCandidate type for use with +// apply. +func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration { + b := &LeaseCandidateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha1") + return b +} + +// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from +// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a +// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API. +// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "") +} + +// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "status") +} + +func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) { + b := &LeaseCandidateApplyConfiguration{} + err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha1.LeaseCandidate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(leaseCandidate.Name) + b.WithNamespace(leaseCandidate.Namespace) + + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidateApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCandidateApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *LeaseCandidateApplyConfiguration) WithFinalizers(values ...string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *LeaseCandidateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithSpec(value *LeaseCandidateSpecApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseCandidateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go new file mode 100644 index 0000000000..61d3dca10b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go @@ -0,0 +1,91 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + coordinationv1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use +// with apply. +type LeaseCandidateSpecApplyConfiguration struct { + LeaseName *string `json:"leaseName,omitempty"` + PingTime *v1.MicroTime `json:"pingTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + BinaryVersion *string `json:"binaryVersion,omitempty"` + EmulationVersion *string `json:"emulationVersion,omitempty"` + PreferredStrategies []coordinationv1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty"` +} + +// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with +// apply. +func LeaseCandidateSpec() *LeaseCandidateSpecApplyConfiguration { + return &LeaseCandidateSpecApplyConfiguration{} +} + +// WithLeaseName sets the LeaseName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LeaseName field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithLeaseName(value string) *LeaseCandidateSpecApplyConfiguration { + b.LeaseName = &value + return b +} + +// WithPingTime sets the PingTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PingTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithPingTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.PingTime = &value + return b +} + +// WithRenewTime sets the RenewTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RenewTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.RenewTime = &value + return b +} + +// WithBinaryVersion sets the BinaryVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BinaryVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithBinaryVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.BinaryVersion = &value + return b +} + +// WithEmulationVersion sets the EmulationVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EmulationVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithEmulationVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.EmulationVersion = &value + return b +} + +// WithPreferredStrategies adds the given value to the PreferredStrategies field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PreferredStrategies field. +func (b *LeaseCandidateSpecApplyConfiguration) WithPreferredStrategies(values ...coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration { + for i := range values { + b.PreferredStrategies = append(b.PreferredStrategies, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go index 867e0f58ba..9aa0703e8e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct { Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go index 865eb76455..8c7fddfc61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1beta1 import ( + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go index df6d1c64e5..45484f140d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AffinityApplyConfiguration represents an declarative configuration of the Affinity type for use +// AffinityApplyConfiguration represents a declarative configuration of the Affinity type for use // with apply. type AffinityApplyConfiguration struct { NodeAffinity *NodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` @@ -26,7 +26,7 @@ type AffinityApplyConfiguration struct { PodAntiAffinity *PodAntiAffinityApplyConfiguration `json:"podAntiAffinity,omitempty"` } -// AffinityApplyConfiguration constructs an declarative configuration of the Affinity type for use with +// AffinityApplyConfiguration constructs a declarative configuration of the Affinity type for use with // apply. func Affinity() *AffinityApplyConfiguration { return &AffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go index 7f3c22afa1..1d698fd610 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// AppArmorProfileApplyConfiguration represents an declarative configuration of the AppArmorProfile type for use +// AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use // with apply. type AppArmorProfileApplyConfiguration struct { Type *v1.AppArmorProfileType `json:"type,omitempty"` LocalhostProfile *string `json:"localhostProfile,omitempty"` } -// AppArmorProfileApplyConfiguration constructs an declarative configuration of the AppArmorProfile type for use with +// AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with // apply. func AppArmorProfile() *AppArmorProfileApplyConfiguration { return &AppArmorProfileApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go index 970bf24c45..e4c2fff3f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// AttachedVolumeApplyConfiguration represents an declarative configuration of the AttachedVolume type for use +// AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use // with apply. type AttachedVolumeApplyConfiguration struct { Name *v1.UniqueVolumeName `json:"name,omitempty"` DevicePath *string `json:"devicePath,omitempty"` } -// AttachedVolumeApplyConfiguration constructs an declarative configuration of the AttachedVolume type for use with +// AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with // apply. func AttachedVolume() *AttachedVolumeApplyConfiguration { return &AttachedVolumeApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go index 6ff335e9d6..d08786965e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use +// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use // with apply. type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with +// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with // apply. func AWSElasticBlockStoreVolumeSource() *AWSElasticBlockStoreVolumeSourceApplyConfiguration { return &AWSElasticBlockStoreVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go index b2774735ae..40ad5ac78f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// AzureDiskVolumeSourceApplyConfiguration represents an declarative configuration of the AzureDiskVolumeSource type for use +// AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use // with apply. type AzureDiskVolumeSourceApplyConfiguration struct { DiskName *string `json:"diskName,omitempty"` @@ -33,7 +33,7 @@ type AzureDiskVolumeSourceApplyConfiguration struct { Kind *v1.AzureDataDiskKind `json:"kind,omitempty"` } -// AzureDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureDiskVolumeSource type for use with +// AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with // apply. func AzureDiskVolumeSource() *AzureDiskVolumeSourceApplyConfiguration { return &AzureDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go index f173938334..70a6b17be8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFilePersistentVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFilePersistentVolumeSource type for use +// AzureFilePersistentVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFilePersistentVolumeSource type for use // with apply. type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretNamespace *string `json:"secretNamespace,omitempty"` } -// AzureFilePersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFilePersistentVolumeSource type for use with +// AzureFilePersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFilePersistentVolumeSource type for use with // apply. func AzureFilePersistentVolumeSource() *AzureFilePersistentVolumeSourceApplyConfiguration { return &AzureFilePersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go index a7f7f33d88..ff0c867919 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFileVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFileVolumeSource type for use +// AzureFileVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFileVolumeSource type for use // with apply. type AzureFileVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -26,7 +26,7 @@ type AzureFileVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AzureFileVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFileVolumeSource type for use with +// AzureFileVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFileVolumeSource type for use with // apply. func AzureFileVolumeSource() *AzureFileVolumeSourceApplyConfiguration { return &AzureFileVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go index c3d176c4d8..1c463aef50 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// CapabilitiesApplyConfiguration represents an declarative configuration of the Capabilities type for use +// CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use // with apply. type CapabilitiesApplyConfiguration struct { Add []v1.Capability `json:"add,omitempty"` Drop []v1.Capability `json:"drop,omitempty"` } -// CapabilitiesApplyConfiguration constructs an declarative configuration of the Capabilities type for use with +// CapabilitiesApplyConfiguration constructs a declarative configuration of the Capabilities type for use with // apply. func Capabilities() *CapabilitiesApplyConfiguration { return &CapabilitiesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go index a41936fe3d..f3ee2d03e9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSPersistentVolumeSource type for use +// CephFSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSPersistentVolumeSource type for use // with apply. type CephFSPersistentVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSPersistentVolumeSource type for use with +// CephFSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSPersistentVolumeSource type for use with // apply. func CephFSPersistentVolumeSource() *CephFSPersistentVolumeSourceApplyConfiguration { return &CephFSPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go index 0ea070ba5d..77d53d6eb0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSVolumeSource type for use +// CephFSVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSVolumeSource type for use // with apply. type CephFSVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSVolumeSource type for use with +// CephFSVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSVolumeSource type for use with // apply. func CephFSVolumeSource() *CephFSVolumeSourceApplyConfiguration { return &CephFSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go index 7754cf92f7..b265734882 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CinderPersistentVolumeSource type for use +// CinderPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CinderPersistentVolumeSource type for use // with apply. type CinderPersistentVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderPersistentVolumeSourceApplyConfiguration struct { SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderPersistentVolumeSource type for use with +// CinderPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderPersistentVolumeSource type for use with // apply. func CinderPersistentVolumeSource() *CinderPersistentVolumeSourceApplyConfiguration { return &CinderPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go index 51271e279d..131cbf219c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderVolumeSourceApplyConfiguration represents an declarative configuration of the CinderVolumeSource type for use +// CinderVolumeSourceApplyConfiguration represents a declarative configuration of the CinderVolumeSource type for use // with apply. type CinderVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderVolumeSource type for use with +// CinderVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderVolumeSource type for use with // apply. func CinderVolumeSource() *CinderVolumeSourceApplyConfiguration { return &CinderVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go deleted file mode 100644 index 2153570fc0..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// ClaimSourceApplyConfiguration represents an declarative configuration of the ClaimSource type for use -// with apply. -type ClaimSourceApplyConfiguration struct { - ResourceClaimName *string `json:"resourceClaimName,omitempty"` - ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` -} - -// ClaimSourceApplyConfiguration constructs an declarative configuration of the ClaimSource type for use with -// apply. -func ClaimSource() *ClaimSourceApplyConfiguration { - return &ClaimSourceApplyConfiguration{} -} - -// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClaimName field is set to the value of the last call. -func (b *ClaimSourceApplyConfiguration) WithResourceClaimName(value string) *ClaimSourceApplyConfiguration { - b.ResourceClaimName = &value - return b -} - -// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. -func (b *ClaimSourceApplyConfiguration) WithResourceClaimTemplateName(value string) *ClaimSourceApplyConfiguration { - b.ResourceClaimTemplateName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go index a666e8faae..02c4e55e13 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ClientIPConfigApplyConfiguration represents an declarative configuration of the ClientIPConfig type for use +// ClientIPConfigApplyConfiguration represents a declarative configuration of the ClientIPConfig type for use // with apply. type ClientIPConfigApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` } -// ClientIPConfigApplyConfiguration constructs an declarative configuration of the ClientIPConfig type for use with +// ClientIPConfigApplyConfiguration constructs a declarative configuration of the ClientIPConfig type for use with // apply. func ClientIPConfig() *ClientIPConfigApplyConfiguration { return &ClientIPConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go index 5aa686782b..bcfbac63e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterTrustBundleProjectionApplyConfiguration represents an declarative configuration of the ClusterTrustBundleProjection type for use +// ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use // with apply. type ClusterTrustBundleProjectionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ClusterTrustBundleProjectionApplyConfiguration struct { Path *string `json:"path,omitempty"` } -// ClusterTrustBundleProjectionApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleProjection type for use with +// ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with // apply. func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration { return &ClusterTrustBundleProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go index 1ef65f5a0c..0044c7c0bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ComponentConditionApplyConfiguration represents an declarative configuration of the ComponentCondition type for use +// ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use // with apply. type ComponentConditionApplyConfiguration struct { Type *v1.ComponentConditionType `json:"type,omitempty"` @@ -31,7 +31,7 @@ type ComponentConditionApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// ComponentConditionApplyConfiguration constructs an declarative configuration of the ComponentCondition type for use with +// ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with // apply. func ComponentCondition() *ComponentConditionApplyConfiguration { return &ComponentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go index 300e526942..195bde7219 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ComponentStatusApplyConfiguration represents an declarative configuration of the ComponentStatus type for use +// ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use // with apply. type ComponentStatusApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ComponentStatusApplyConfiguration struct { Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"` } -// ComponentStatus constructs an declarative configuration of the ComponentStatus type for use with +// ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with // apply. func ComponentStatus(name string) *ComponentStatusApplyConfiguration { b := &ComponentStatusApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *ComponentStatusApplyConfiguration) WithConditions(values ...*ComponentC } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ComponentStatusApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go index f4cc7024d2..576b7a3d68 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ConfigMapApplyConfiguration represents an declarative configuration of the ConfigMap type for use +// ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use // with apply. type ConfigMapApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ConfigMapApplyConfiguration struct { BinaryData map[string][]byte `json:"binaryData,omitempty"` } -// ConfigMap constructs an declarative configuration of the ConfigMap type for use with +// ConfigMap constructs a declarative configuration of the ConfigMap type for use with // apply. func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration { b := &ConfigMapApplyConfiguration{} @@ -277,3 +277,9 @@ func (b *ConfigMapApplyConfiguration) WithBinaryData(entries map[string][]byte) } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConfigMapApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go index 8802fff48f..b1fccd7000 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ConfigMapEnvSourceApplyConfiguration represents an declarative configuration of the ConfigMapEnvSource type for use +// ConfigMapEnvSourceApplyConfiguration represents a declarative configuration of the ConfigMapEnvSource type for use // with apply. type ConfigMapEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// ConfigMapEnvSourceApplyConfiguration constructs an declarative configuration of the ConfigMapEnvSource type for use with +// ConfigMapEnvSourceApplyConfiguration constructs a declarative configuration of the ConfigMapEnvSource type for use with // apply. func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration { return &ConfigMapEnvSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go index 2a8c800afc..26c2a75b5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapKeySelectorApplyConfiguration represents an declarative configuration of the ConfigMapKeySelector type for use +// ConfigMapKeySelectorApplyConfiguration represents a declarative configuration of the ConfigMapKeySelector type for use // with apply. type ConfigMapKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapKeySelectorApplyConfiguration constructs an declarative configuration of the ConfigMapKeySelector type for use with +// ConfigMapKeySelectorApplyConfiguration constructs a declarative configuration of the ConfigMapKeySelector type for use with // apply. func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration { return &ConfigMapKeySelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go index da9655a544..135bb7d427 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ConfigMapNodeConfigSourceApplyConfiguration represents an declarative configuration of the ConfigMapNodeConfigSource type for use +// ConfigMapNodeConfigSourceApplyConfiguration represents a declarative configuration of the ConfigMapNodeConfigSource type for use // with apply. type ConfigMapNodeConfigSourceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -32,7 +32,7 @@ type ConfigMapNodeConfigSourceApplyConfiguration struct { KubeletConfigKey *string `json:"kubeletConfigKey,omitempty"` } -// ConfigMapNodeConfigSourceApplyConfiguration constructs an declarative configuration of the ConfigMapNodeConfigSource type for use with +// ConfigMapNodeConfigSourceApplyConfiguration constructs a declarative configuration of the ConfigMapNodeConfigSource type for use with // apply. func ConfigMapNodeConfigSource() *ConfigMapNodeConfigSourceApplyConfiguration { return &ConfigMapNodeConfigSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go index 7297d3a437..308b28f57d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapProjectionApplyConfiguration represents an declarative configuration of the ConfigMapProjection type for use +// ConfigMapProjectionApplyConfiguration represents a declarative configuration of the ConfigMapProjection type for use // with apply. type ConfigMapProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapProjectionApplyConfiguration constructs an declarative configuration of the ConfigMapProjection type for use with +// ConfigMapProjectionApplyConfiguration constructs a declarative configuration of the ConfigMapProjection type for use with // apply. func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration { return &ConfigMapProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go index deaebde319..8e0e8dc0f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapVolumeSourceApplyConfiguration represents an declarative configuration of the ConfigMapVolumeSource type for use +// ConfigMapVolumeSourceApplyConfiguration represents a declarative configuration of the ConfigMapVolumeSource type for use // with apply. type ConfigMapVolumeSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -27,7 +27,7 @@ type ConfigMapVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapVolumeSourceApplyConfiguration constructs an declarative configuration of the ConfigMapVolumeSource type for use with +// ConfigMapVolumeSourceApplyConfiguration constructs a declarative configuration of the ConfigMapVolumeSource type for use with // apply. func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration { return &ConfigMapVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index 32d7156063..eed5f7d027 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ContainerApplyConfiguration represents an declarative configuration of the Container type for use +// ContainerApplyConfiguration represents a declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -51,7 +51,7 @@ type ContainerApplyConfiguration struct { TTY *bool `json:"tty,omitempty"` } -// ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with +// ContainerApplyConfiguration constructs a declarative configuration of the Container type for use with // apply. func Container() *ContainerApplyConfiguration { return &ContainerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go index d5c874a7ce..bc9428fd10 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerImageApplyConfiguration represents an declarative configuration of the ContainerImage type for use +// ContainerImageApplyConfiguration represents a declarative configuration of the ContainerImage type for use // with apply. type ContainerImageApplyConfiguration struct { Names []string `json:"names,omitempty"` SizeBytes *int64 `json:"sizeBytes,omitempty"` } -// ContainerImageApplyConfiguration constructs an declarative configuration of the ContainerImage type for use with +// ContainerImageApplyConfiguration constructs a declarative configuration of the ContainerImage type for use with // apply. func ContainerImage() *ContainerImageApplyConfiguration { return &ContainerImageApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go index a23ad9268a..7acc0638f2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerPortApplyConfiguration represents an declarative configuration of the ContainerPort type for use +// ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use // with apply. type ContainerPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerPortApplyConfiguration struct { HostIP *string `json:"hostIP,omitempty"` } -// ContainerPortApplyConfiguration constructs an declarative configuration of the ContainerPort type for use with +// ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with // apply. func ContainerPort() *ContainerPortApplyConfiguration { return &ContainerPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go index bbbcbc9f13..ea60e3d987 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use +// ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use // with apply. type ContainerResizePolicyApplyConfiguration struct { ResourceName *v1.ResourceName `json:"resourceName,omitempty"` RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` } -// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with +// ContainerResizePolicyApplyConfiguration constructs a declarative configuration of the ContainerResizePolicy type for use with // apply. func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { return &ContainerResizePolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go index 6cbfc7fd9b..b958e01774 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ContainerStateApplyConfiguration represents an declarative configuration of the ContainerState type for use +// ContainerStateApplyConfiguration represents a declarative configuration of the ContainerState type for use // with apply. type ContainerStateApplyConfiguration struct { Waiting *ContainerStateWaitingApplyConfiguration `json:"waiting,omitempty"` @@ -26,7 +26,7 @@ type ContainerStateApplyConfiguration struct { Terminated *ContainerStateTerminatedApplyConfiguration `json:"terminated,omitempty"` } -// ContainerStateApplyConfiguration constructs an declarative configuration of the ContainerState type for use with +// ContainerStateApplyConfiguration constructs a declarative configuration of the ContainerState type for use with // apply. func ContainerState() *ContainerStateApplyConfiguration { return &ContainerStateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go index 6c1d7311e7..6eec9f7f2c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateRunningApplyConfiguration represents an declarative configuration of the ContainerStateRunning type for use +// ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use // with apply. type ContainerStateRunningApplyConfiguration struct { StartedAt *v1.Time `json:"startedAt,omitempty"` } -// ContainerStateRunningApplyConfiguration constructs an declarative configuration of the ContainerStateRunning type for use with +// ContainerStateRunningApplyConfiguration constructs a declarative configuration of the ContainerStateRunning type for use with // apply. func ContainerStateRunning() *ContainerStateRunningApplyConfiguration { return &ContainerStateRunningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go index 0383c9dd9d..b067aa211e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateTerminatedApplyConfiguration represents an declarative configuration of the ContainerStateTerminated type for use +// ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use // with apply. type ContainerStateTerminatedApplyConfiguration struct { ExitCode *int32 `json:"exitCode,omitempty"` @@ -34,7 +34,7 @@ type ContainerStateTerminatedApplyConfiguration struct { ContainerID *string `json:"containerID,omitempty"` } -// ContainerStateTerminatedApplyConfiguration constructs an declarative configuration of the ContainerStateTerminated type for use with +// ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with // apply. func ContainerStateTerminated() *ContainerStateTerminatedApplyConfiguration { return &ContainerStateTerminatedApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go index e51b778c0d..7756c7da03 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerStateWaitingApplyConfiguration represents an declarative configuration of the ContainerStateWaiting type for use +// ContainerStateWaitingApplyConfiguration represents a declarative configuration of the ContainerStateWaiting type for use // with apply. type ContainerStateWaitingApplyConfiguration struct { Reason *string `json:"reason,omitempty"` Message *string `json:"message,omitempty"` } -// ContainerStateWaitingApplyConfiguration constructs an declarative configuration of the ContainerStateWaiting type for use with +// ContainerStateWaitingApplyConfiguration constructs a declarative configuration of the ContainerStateWaiting type for use with // apply. func ContainerStateWaiting() *ContainerStateWaitingApplyConfiguration { return &ContainerStateWaitingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index e3f774bbb3..6a28939c2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -22,24 +22,26 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use +// ContainerStatusApplyConfiguration represents a declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` - AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"` -} - -// ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"` + User *ContainerUserApplyConfiguration `json:"user,omitempty"` + AllocatedResourcesStatus []ResourceStatusApplyConfiguration `json:"allocatedResourcesStatus,omitempty"` +} + +// ContainerStatusApplyConfiguration constructs a declarative configuration of the ContainerStatus type for use with // apply. func ContainerStatus() *ContainerStatusApplyConfiguration { return &ContainerStatusApplyConfiguration{} @@ -145,3 +147,24 @@ func (b *ContainerStatusApplyConfiguration) WithVolumeMounts(values ...*VolumeMo } return b } + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithUser(value *ContainerUserApplyConfiguration) *ContainerStatusApplyConfiguration { + b.User = value + return b +} + +// WithAllocatedResourcesStatus adds the given value to the AllocatedResourcesStatus field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllocatedResourcesStatus field. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResourcesStatus(values ...*ResourceStatusApplyConfiguration) *ContainerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllocatedResourcesStatus") + } + b.AllocatedResourcesStatus = append(b.AllocatedResourcesStatus, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go new file mode 100644 index 0000000000..34ec8e4146 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ContainerUserApplyConfiguration represents a declarative configuration of the ContainerUser type for use +// with apply. +type ContainerUserApplyConfiguration struct { + Linux *LinuxContainerUserApplyConfiguration `json:"linux,omitempty"` +} + +// ContainerUserApplyConfiguration constructs a declarative configuration of the ContainerUser type for use with +// apply. +func ContainerUser() *ContainerUserApplyConfiguration { + return &ContainerUserApplyConfiguration{} +} + +// WithLinux sets the Linux field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Linux field is set to the value of the last call. +func (b *ContainerUserApplyConfiguration) WithLinux(value *LinuxContainerUserApplyConfiguration) *ContainerUserApplyConfiguration { + b.Linux = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go index 2fc681604e..a614d10805 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CSIPersistentVolumeSource type for use +// CSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CSIPersistentVolumeSource type for use // with apply. type CSIPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -33,7 +33,7 @@ type CSIPersistentVolumeSourceApplyConfiguration struct { NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"` } -// CSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIPersistentVolumeSource type for use with +// CSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIPersistentVolumeSource type for use with // apply. func CSIPersistentVolumeSource() *CSIPersistentVolumeSourceApplyConfiguration { return &CSIPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go index c2a32df8d0..b58d9bbb4b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIVolumeSourceApplyConfiguration represents an declarative configuration of the CSIVolumeSource type for use +// CSIVolumeSourceApplyConfiguration represents a declarative configuration of the CSIVolumeSource type for use // with apply. type CSIVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type CSIVolumeSourceApplyConfiguration struct { NodePublishSecretRef *LocalObjectReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"` } -// CSIVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIVolumeSource type for use with +// CSIVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIVolumeSource type for use with // apply. func CSIVolumeSource() *CSIVolumeSourceApplyConfiguration { return &CSIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go index 13a2e948f1..5be27ec0c5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DaemonEndpointApplyConfiguration represents an declarative configuration of the DaemonEndpoint type for use +// DaemonEndpointApplyConfiguration represents a declarative configuration of the DaemonEndpoint type for use // with apply. type DaemonEndpointApplyConfiguration struct { Port *int32 `json:"Port,omitempty"` } -// DaemonEndpointApplyConfiguration constructs an declarative configuration of the DaemonEndpoint type for use with +// DaemonEndpointApplyConfiguration constructs a declarative configuration of the DaemonEndpoint type for use with // apply. func DaemonEndpoint() *DaemonEndpointApplyConfiguration { return &DaemonEndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go index f88a87c0b5..ed6b8b1bbe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DownwardAPIProjectionApplyConfiguration represents an declarative configuration of the DownwardAPIProjection type for use +// DownwardAPIProjectionApplyConfiguration represents a declarative configuration of the DownwardAPIProjection type for use // with apply. type DownwardAPIProjectionApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` } -// DownwardAPIProjectionApplyConfiguration constructs an declarative configuration of the DownwardAPIProjection type for use with +// DownwardAPIProjectionApplyConfiguration constructs a declarative configuration of the DownwardAPIProjection type for use with // apply. func DownwardAPIProjection() *DownwardAPIProjectionApplyConfiguration { return &DownwardAPIProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go index b25ff25fa9..ec9d013dd9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DownwardAPIVolumeFileApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeFile type for use +// DownwardAPIVolumeFileApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeFile type for use // with apply. type DownwardAPIVolumeFileApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -27,7 +27,7 @@ type DownwardAPIVolumeFileApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// DownwardAPIVolumeFileApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeFile type for use with +// DownwardAPIVolumeFileApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeFile type for use with // apply. func DownwardAPIVolumeFile() *DownwardAPIVolumeFileApplyConfiguration { return &DownwardAPIVolumeFileApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go index 6913bb5218..eef9d7ef8d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// DownwardAPIVolumeSourceApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeSource type for use +// DownwardAPIVolumeSourceApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeSource type for use // with apply. type DownwardAPIVolumeSourceApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// DownwardAPIVolumeSourceApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeSource type for use with +// DownwardAPIVolumeSourceApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeSource type for use with // apply. func DownwardAPIVolumeSource() *DownwardAPIVolumeSourceApplyConfiguration { return &DownwardAPIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go index 021280daf6..a619fdb074 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go @@ -23,14 +23,14 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// EmptyDirVolumeSourceApplyConfiguration represents an declarative configuration of the EmptyDirVolumeSource type for use +// EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use // with apply. type EmptyDirVolumeSourceApplyConfiguration struct { Medium *v1.StorageMedium `json:"medium,omitempty"` SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` } -// EmptyDirVolumeSourceApplyConfiguration constructs an declarative configuration of the EmptyDirVolumeSource type for use with +// EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with // apply. func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration { return &EmptyDirVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go index 52a54b6008..536e697a9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointAddressApplyConfiguration represents an declarative configuration of the EndpointAddress type for use +// EndpointAddressApplyConfiguration represents a declarative configuration of the EndpointAddress type for use // with apply. type EndpointAddressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -27,7 +27,7 @@ type EndpointAddressApplyConfiguration struct { TargetRef *ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"` } -// EndpointAddressApplyConfiguration constructs an declarative configuration of the EndpointAddress type for use with +// EndpointAddressApplyConfiguration constructs a declarative configuration of the EndpointAddress type for use with // apply. func EndpointAddress() *EndpointAddressApplyConfiguration { return &EndpointAddressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go index cc00d0e491..d0d96230ce 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go index b98fed0858..98dc69aaab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointsApplyConfiguration represents an declarative configuration of the Endpoints type for use +// EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use // with apply. type EndpointsApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EndpointsApplyConfiguration struct { Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"` } -// Endpoints constructs an declarative configuration of the Endpoints type for use with +// Endpoints constructs a declarative configuration of the Endpoints type for use with // apply. func Endpoints(name, namespace string) *EndpointsApplyConfiguration { b := &EndpointsApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *EndpointsApplyConfiguration) WithSubsets(values ...*EndpointSubsetApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointsApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go index cd0657a80c..33cd8496a7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointSubsetApplyConfiguration represents an declarative configuration of the EndpointSubset type for use +// EndpointSubsetApplyConfiguration represents a declarative configuration of the EndpointSubset type for use // with apply. type EndpointSubsetApplyConfiguration struct { Addresses []EndpointAddressApplyConfiguration `json:"addresses,omitempty"` @@ -26,7 +26,7 @@ type EndpointSubsetApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSubsetApplyConfiguration constructs an declarative configuration of the EndpointSubset type for use with +// EndpointSubsetApplyConfiguration constructs a declarative configuration of the EndpointSubset type for use with // apply. func EndpointSubset() *EndpointSubsetApplyConfiguration { return &EndpointSubsetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go index 9e46d25ded..7aa181cf1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvFromSourceApplyConfiguration represents an declarative configuration of the EnvFromSource type for use +// EnvFromSourceApplyConfiguration represents a declarative configuration of the EnvFromSource type for use // with apply. type EnvFromSourceApplyConfiguration struct { Prefix *string `json:"prefix,omitempty"` @@ -26,7 +26,7 @@ type EnvFromSourceApplyConfiguration struct { SecretRef *SecretEnvSourceApplyConfiguration `json:"secretRef,omitempty"` } -// EnvFromSourceApplyConfiguration constructs an declarative configuration of the EnvFromSource type for use with +// EnvFromSourceApplyConfiguration constructs a declarative configuration of the EnvFromSource type for use with // apply. func EnvFromSource() *EnvFromSourceApplyConfiguration { return &EnvFromSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go index a83528a28e..5894166ca4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarApplyConfiguration represents an declarative configuration of the EnvVar type for use +// EnvVarApplyConfiguration represents a declarative configuration of the EnvVar type for use // with apply. type EnvVarApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -26,7 +26,7 @@ type EnvVarApplyConfiguration struct { ValueFrom *EnvVarSourceApplyConfiguration `json:"valueFrom,omitempty"` } -// EnvVarApplyConfiguration constructs an declarative configuration of the EnvVar type for use with +// EnvVarApplyConfiguration constructs a declarative configuration of the EnvVar type for use with // apply. func EnvVar() *EnvVarApplyConfiguration { return &EnvVarApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go index 70c695bd5b..a3a55ea7af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarSourceApplyConfiguration represents an declarative configuration of the EnvVarSource type for use +// EnvVarSourceApplyConfiguration represents a declarative configuration of the EnvVarSource type for use // with apply. type EnvVarSourceApplyConfiguration struct { FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"` @@ -27,7 +27,7 @@ type EnvVarSourceApplyConfiguration struct { SecretKeyRef *SecretKeySelectorApplyConfiguration `json:"secretKeyRef,omitempty"` } -// EnvVarSourceApplyConfiguration constructs an declarative configuration of the EnvVarSource type for use with +// EnvVarSourceApplyConfiguration constructs a declarative configuration of the EnvVarSource type for use with // apply. func EnvVarSource() *EnvVarSourceApplyConfiguration { return &EnvVarSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 5fa79a246e..a15ac6ec34 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerApplyConfiguration represents an declarative configuration of the EphemeralContainer type for use +// EphemeralContainerApplyConfiguration represents a declarative configuration of the EphemeralContainer type for use // with apply. type EphemeralContainerApplyConfiguration struct { EphemeralContainerCommonApplyConfiguration `json:",inline"` TargetContainerName *string `json:"targetContainerName,omitempty"` } -// EphemeralContainerApplyConfiguration constructs an declarative configuration of the EphemeralContainer type for use with +// EphemeralContainerApplyConfiguration constructs a declarative configuration of the EphemeralContainer type for use with // apply. func EphemeralContainer() *EphemeralContainerApplyConfiguration { return &EphemeralContainerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 8cded29a9e..d5d13d27a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use +// EphemeralContainerCommonApplyConfiguration represents a declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -51,7 +51,7 @@ type EphemeralContainerCommonApplyConfiguration struct { TTY *bool `json:"tty,omitempty"` } -// EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with +// EphemeralContainerCommonApplyConfiguration constructs a declarative configuration of the EphemeralContainerCommon type for use with // apply. func EphemeralContainerCommon() *EphemeralContainerCommonApplyConfiguration { return &EphemeralContainerCommonApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go index 31859404cc..d2c8c6722e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EphemeralVolumeSourceApplyConfiguration represents an declarative configuration of the EphemeralVolumeSource type for use +// EphemeralVolumeSourceApplyConfiguration represents a declarative configuration of the EphemeralVolumeSource type for use // with apply. type EphemeralVolumeSourceApplyConfiguration struct { VolumeClaimTemplate *PersistentVolumeClaimTemplateApplyConfiguration `json:"volumeClaimTemplate,omitempty"` } -// EphemeralVolumeSourceApplyConfiguration constructs an declarative configuration of the EphemeralVolumeSource type for use with +// EphemeralVolumeSourceApplyConfiguration constructs a declarative configuration of the EphemeralVolumeSource type for use with // apply. func EphemeralVolumeSource() *EphemeralVolumeSourceApplyConfiguration { return &EphemeralVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go index 60aff6b5b2..65d6577ab6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -48,7 +48,7 @@ type EventApplyConfiguration struct { ReportingInstance *string `json:"reportingInstance,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -364,3 +364,9 @@ func (b *EventApplyConfiguration) WithReportingInstance(value string) *EventAppl b.ReportingInstance = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go index e66fb41271..18069c0d1b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go index 2eb4aa8e44..97edb04931 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// EventSourceApplyConfiguration represents an declarative configuration of the EventSource type for use +// EventSourceApplyConfiguration represents a declarative configuration of the EventSource type for use // with apply. type EventSourceApplyConfiguration struct { Component *string `json:"component,omitempty"` Host *string `json:"host,omitempty"` } -// EventSourceApplyConfiguration constructs an declarative configuration of the EventSource type for use with +// EventSourceApplyConfiguration constructs a declarative configuration of the EventSource type for use with // apply. func EventSource() *EventSourceApplyConfiguration { return &EventSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go index 1df52144d7..b7208a91cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ExecActionApplyConfiguration represents an declarative configuration of the ExecAction type for use +// ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use // with apply. type ExecActionApplyConfiguration struct { Command []string `json:"command,omitempty"` } -// ExecActionApplyConfiguration constructs an declarative configuration of the ExecAction type for use with +// ExecActionApplyConfiguration constructs a declarative configuration of the ExecAction type for use with // apply. func ExecAction() *ExecActionApplyConfiguration { return &ExecActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go index 43069de9a6..000ff2cc62 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FCVolumeSourceApplyConfiguration represents an declarative configuration of the FCVolumeSource type for use +// FCVolumeSourceApplyConfiguration represents a declarative configuration of the FCVolumeSource type for use // with apply. type FCVolumeSourceApplyConfiguration struct { TargetWWNs []string `json:"targetWWNs,omitempty"` @@ -28,7 +28,7 @@ type FCVolumeSourceApplyConfiguration struct { WWIDs []string `json:"wwids,omitempty"` } -// FCVolumeSourceApplyConfiguration constructs an declarative configuration of the FCVolumeSource type for use with +// FCVolumeSourceApplyConfiguration constructs a declarative configuration of the FCVolumeSource type for use with // apply. func FCVolumeSource() *FCVolumeSourceApplyConfiguration { return &FCVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go index 47e7c746ee..355c2c82d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the FlexPersistentVolumeSource type for use +// FlexPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the FlexPersistentVolumeSource type for use // with apply. type FlexPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexPersistentVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexPersistentVolumeSource type for use with +// FlexPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexPersistentVolumeSource type for use with // apply. func FlexPersistentVolumeSource() *FlexPersistentVolumeSourceApplyConfiguration { return &FlexPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go index 7c09516a98..08ae9e1bea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexVolumeSourceApplyConfiguration represents an declarative configuration of the FlexVolumeSource type for use +// FlexVolumeSourceApplyConfiguration represents a declarative configuration of the FlexVolumeSource type for use // with apply. type FlexVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexVolumeSource type for use with +// FlexVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexVolumeSource type for use with // apply. func FlexVolumeSource() *FlexVolumeSourceApplyConfiguration { return &FlexVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go index 74896d55ac..e4ecbba0e4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// FlockerVolumeSourceApplyConfiguration represents an declarative configuration of the FlockerVolumeSource type for use +// FlockerVolumeSourceApplyConfiguration represents a declarative configuration of the FlockerVolumeSource type for use // with apply. type FlockerVolumeSourceApplyConfiguration struct { DatasetName *string `json:"datasetName,omitempty"` DatasetUUID *string `json:"datasetUUID,omitempty"` } -// FlockerVolumeSourceApplyConfiguration constructs an declarative configuration of the FlockerVolumeSource type for use with +// FlockerVolumeSourceApplyConfiguration constructs a declarative configuration of the FlockerVolumeSource type for use with // apply. func FlockerVolumeSource() *FlockerVolumeSourceApplyConfiguration { return &FlockerVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go index 0869d3eaa6..56c4d03fa2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GCEPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the GCEPersistentDiskVolumeSource type for use +// GCEPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the GCEPersistentDiskVolumeSource type for use // with apply. type GCEPersistentDiskVolumeSourceApplyConfiguration struct { PDName *string `json:"pdName,omitempty"` @@ -27,7 +27,7 @@ type GCEPersistentDiskVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GCEPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the GCEPersistentDiskVolumeSource type for use with +// GCEPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the GCEPersistentDiskVolumeSource type for use with // apply. func GCEPersistentDiskVolumeSource() *GCEPersistentDiskVolumeSourceApplyConfiguration { return &GCEPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go index 825e02e4e4..4ed92317c8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GitRepoVolumeSourceApplyConfiguration represents an declarative configuration of the GitRepoVolumeSource type for use +// GitRepoVolumeSourceApplyConfiguration represents a declarative configuration of the GitRepoVolumeSource type for use // with apply. type GitRepoVolumeSourceApplyConfiguration struct { Repository *string `json:"repository,omitempty"` @@ -26,7 +26,7 @@ type GitRepoVolumeSourceApplyConfiguration struct { Directory *string `json:"directory,omitempty"` } -// GitRepoVolumeSourceApplyConfiguration constructs an declarative configuration of the GitRepoVolumeSource type for use with +// GitRepoVolumeSourceApplyConfiguration constructs a declarative configuration of the GitRepoVolumeSource type for use with // apply. func GitRepoVolumeSource() *GitRepoVolumeSourceApplyConfiguration { return &GitRepoVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go index 21a3925e52..c9a23ca5d7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsPersistentVolumeSource type for use +// GlusterfsPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsPersistentVolumeSource type for use // with apply. type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -27,7 +27,7 @@ type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsNamespace *string `json:"endpointsNamespace,omitempty"` } -// GlusterfsPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsPersistentVolumeSource type for use with +// GlusterfsPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsPersistentVolumeSource type for use with // apply. func GlusterfsPersistentVolumeSource() *GlusterfsPersistentVolumeSourceApplyConfiguration { return &GlusterfsPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go index 7ce6f0b399..8c27f8c70d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsVolumeSource type for use +// GlusterfsVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsVolumeSource type for use // with apply. type GlusterfsVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -26,7 +26,7 @@ type GlusterfsVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GlusterfsVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsVolumeSource type for use with +// GlusterfsVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsVolumeSource type for use with // apply. func GlusterfsVolumeSource() *GlusterfsVolumeSourceApplyConfiguration { return &GlusterfsVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go index f94e55937a..0f3a886714 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// GRPCActionApplyConfiguration represents an declarative configuration of the GRPCAction type for use +// GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use // with apply. type GRPCActionApplyConfiguration struct { Port *int32 `json:"port,omitempty"` Service *string `json:"service,omitempty"` } -// GRPCActionApplyConfiguration constructs an declarative configuration of the GRPCAction type for use with +// GRPCActionApplyConfiguration constructs a declarative configuration of the GRPCAction type for use with // apply. func GRPCAction() *GRPCActionApplyConfiguration { return &GRPCActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go index 861508ef53..ec9ea17413 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HostAliasApplyConfiguration represents an declarative configuration of the HostAlias type for use +// HostAliasApplyConfiguration represents a declarative configuration of the HostAlias type for use // with apply. type HostAliasApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostnames []string `json:"hostnames,omitempty"` } -// HostAliasApplyConfiguration constructs an declarative configuration of the HostAlias type for use with +// HostAliasApplyConfiguration constructs a declarative configuration of the HostAlias type for use with // apply. func HostAlias() *HostAliasApplyConfiguration { return &HostAliasApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go index c2a42cf747..439b5ce2d6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// HostIPApplyConfiguration represents an declarative configuration of the HostIP type for use +// HostIPApplyConfiguration represents a declarative configuration of the HostIP type for use // with apply. type HostIPApplyConfiguration struct { IP *string `json:"ip,omitempty"` } -// HostIPApplyConfiguration constructs an declarative configuration of the HostIP type for use with +// HostIPApplyConfiguration constructs a declarative configuration of the HostIP type for use with // apply. func HostIP() *HostIPApplyConfiguration { return &HostIPApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go index 8b15689eef..10dfedfdef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// HostPathVolumeSourceApplyConfiguration represents an declarative configuration of the HostPathVolumeSource type for use +// HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use // with apply. type HostPathVolumeSourceApplyConfiguration struct { Path *string `json:"path,omitempty"` Type *v1.HostPathType `json:"type,omitempty"` } -// HostPathVolumeSourceApplyConfiguration constructs an declarative configuration of the HostPathVolumeSource type for use with +// HostPathVolumeSourceApplyConfiguration constructs a declarative configuration of the HostPathVolumeSource type for use with // apply. func HostPathVolumeSource() *HostPathVolumeSourceApplyConfiguration { return &HostPathVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go index e4ecdd4303..5ecbc27fea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// HTTPGetActionApplyConfiguration represents an declarative configuration of the HTTPGetAction type for use +// HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use // with apply. type HTTPGetActionApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -33,7 +33,7 @@ type HTTPGetActionApplyConfiguration struct { HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"` } -// HTTPGetActionApplyConfiguration constructs an declarative configuration of the HTTPGetAction type for use with +// HTTPGetActionApplyConfiguration constructs a declarative configuration of the HTTPGetAction type for use with // apply. func HTTPGetAction() *HTTPGetActionApplyConfiguration { return &HTTPGetActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go index d55f36bfd2..2526371669 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HTTPHeaderApplyConfiguration represents an declarative configuration of the HTTPHeader type for use +// HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use // with apply. type HTTPHeaderApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// HTTPHeaderApplyConfiguration constructs an declarative configuration of the HTTPHeader type for use with +// HTTPHeaderApplyConfiguration constructs a declarative configuration of the HTTPHeader type for use with // apply. func HTTPHeader() *HTTPHeaderApplyConfiguration { return &HTTPHeaderApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go new file mode 100644 index 0000000000..340f150400 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use +// with apply. +type ImageVolumeSourceApplyConfiguration struct { + Reference *string `json:"reference,omitempty"` + PullPolicy *v1.PullPolicy `json:"pullPolicy,omitempty"` +} + +// ImageVolumeSourceApplyConfiguration constructs a declarative configuration of the ImageVolumeSource type for use with +// apply. +func ImageVolumeSource() *ImageVolumeSourceApplyConfiguration { + return &ImageVolumeSourceApplyConfiguration{} +} + +// WithReference sets the Reference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reference field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithReference(value string) *ImageVolumeSourceApplyConfiguration { + b.Reference = &value + return b +} + +// WithPullPolicy sets the PullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullPolicy field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value v1.PullPolicy) *ImageVolumeSourceApplyConfiguration { + b.PullPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go index c7b248181a..42f420c568 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIPersistentVolumeSource type for use +// ISCSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIPersistentVolumeSource type for use // with apply. type ISCSIPersistentVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIPersistentVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIPersistentVolumeSource type for use with +// ISCSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIPersistentVolumeSource type for use with // apply. func ISCSIPersistentVolumeSource() *ISCSIPersistentVolumeSourceApplyConfiguration { return &ISCSIPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go index c95941a9c7..61055434bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIVolumeSource type for use +// ISCSIVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIVolumeSource type for use // with apply. type ISCSIVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIVolumeSource type for use with +// ISCSIVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIVolumeSource type for use with // apply. func ISCSIVolumeSource() *ISCSIVolumeSourceApplyConfiguration { return &ISCSIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go index d58676d34c..c961b07955 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// KeyToPathApplyConfiguration represents an declarative configuration of the KeyToPath type for use +// KeyToPathApplyConfiguration represents a declarative configuration of the KeyToPath type for use // with apply. type KeyToPathApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -26,7 +26,7 @@ type KeyToPathApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// KeyToPathApplyConfiguration constructs an declarative configuration of the KeyToPath type for use with +// KeyToPathApplyConfiguration constructs a declarative configuration of the KeyToPath type for use with // apply. func KeyToPath() *KeyToPathApplyConfiguration { return &KeyToPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go index db9abf8af7..e37a30f597 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LifecycleApplyConfiguration represents an declarative configuration of the Lifecycle type for use +// LifecycleApplyConfiguration represents a declarative configuration of the Lifecycle type for use // with apply. type LifecycleApplyConfiguration struct { PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"` PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"` } -// LifecycleApplyConfiguration constructs an declarative configuration of the Lifecycle type for use with +// LifecycleApplyConfiguration constructs a declarative configuration of the Lifecycle type for use with // apply. func Lifecycle() *LifecycleApplyConfiguration { return &LifecycleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index e4ae9c49f7..b7c706d58d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// LifecycleHandlerApplyConfiguration represents an declarative configuration of the LifecycleHandler type for use +// LifecycleHandlerApplyConfiguration represents a declarative configuration of the LifecycleHandler type for use // with apply. type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` @@ -27,7 +27,7 @@ type LifecycleHandlerApplyConfiguration struct { Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } -// LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with +// LifecycleHandlerApplyConfiguration constructs a declarative configuration of the LifecycleHandler type for use with // apply. func LifecycleHandler() *LifecycleHandlerApplyConfiguration { return &LifecycleHandlerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go index eaf635c76a..7770200a0a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LimitRangeApplyConfiguration represents an declarative configuration of the LimitRange type for use +// LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use // with apply. type LimitRangeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LimitRangeApplyConfiguration struct { Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"` } -// LimitRange constructs an declarative configuration of the LimitRange type for use with +// LimitRange constructs a declarative configuration of the LimitRange type for use with // apply. func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { b := &LimitRangeApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *LimitRangeApplyConfiguration) WithSpec(value *LimitRangeSpecApplyConfig b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LimitRangeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go index 084650fdaa..61d8344e80 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// LimitRangeItemApplyConfiguration represents an declarative configuration of the LimitRangeItem type for use +// LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use // with apply. type LimitRangeItemApplyConfiguration struct { Type *v1.LimitType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type LimitRangeItemApplyConfiguration struct { MaxLimitRequestRatio *v1.ResourceList `json:"maxLimitRequestRatio,omitempty"` } -// LimitRangeItemApplyConfiguration constructs an declarative configuration of the LimitRangeItem type for use with +// LimitRangeItemApplyConfiguration constructs a declarative configuration of the LimitRangeItem type for use with // apply. func LimitRangeItem() *LimitRangeItemApplyConfiguration { return &LimitRangeItemApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go index 5eee5c498e..8d69c1c0cd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LimitRangeSpecApplyConfiguration represents an declarative configuration of the LimitRangeSpec type for use +// LimitRangeSpecApplyConfiguration represents a declarative configuration of the LimitRangeSpec type for use // with apply. type LimitRangeSpecApplyConfiguration struct { Limits []LimitRangeItemApplyConfiguration `json:"limits,omitempty"` } -// LimitRangeSpecApplyConfiguration constructs an declarative configuration of the LimitRangeSpec type for use with +// LimitRangeSpecApplyConfiguration constructs a declarative configuration of the LimitRangeSpec type for use with // apply. func LimitRangeSpec() *LimitRangeSpecApplyConfiguration { return &LimitRangeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go new file mode 100644 index 0000000000..fbab4815ab --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LinuxContainerUserApplyConfiguration represents a declarative configuration of the LinuxContainerUser type for use +// with apply. +type LinuxContainerUserApplyConfiguration struct { + UID *int64 `json:"uid,omitempty"` + GID *int64 `json:"gid,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` +} + +// LinuxContainerUserApplyConfiguration constructs a declarative configuration of the LinuxContainerUser type for use with +// apply. +func LinuxContainerUser() *LinuxContainerUserApplyConfiguration { + return &LinuxContainerUserApplyConfiguration{} +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithUID(value int64) *LinuxContainerUserApplyConfiguration { + b.UID = &value + return b +} + +// WithGID sets the GID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithGID(value int64) *LinuxContainerUserApplyConfiguration { + b.GID = &value + return b +} + +// WithSupplementalGroups adds the given value to the SupplementalGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SupplementalGroups field. +func (b *LinuxContainerUserApplyConfiguration) WithSupplementalGroups(values ...int64) *LinuxContainerUserApplyConfiguration { + for i := range values { + b.SupplementalGroups = append(b.SupplementalGroups, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index a48dac6810..1a7d998152 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use +// LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use // with apply. type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -31,7 +31,7 @@ type LoadBalancerIngressApplyConfiguration struct { Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } -// LoadBalancerIngressApplyConfiguration constructs an declarative configuration of the LoadBalancerIngress type for use with +// LoadBalancerIngressApplyConfiguration constructs a declarative configuration of the LoadBalancerIngress type for use with // apply. func LoadBalancerIngress() *LoadBalancerIngressApplyConfiguration { return &LoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go index 2fcc0cad18..bb3d616c15 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LoadBalancerStatusApplyConfiguration represents an declarative configuration of the LoadBalancerStatus type for use +// LoadBalancerStatusApplyConfiguration represents a declarative configuration of the LoadBalancerStatus type for use // with apply. type LoadBalancerStatusApplyConfiguration struct { Ingress []LoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// LoadBalancerStatusApplyConfiguration constructs an declarative configuration of the LoadBalancerStatus type for use with +// LoadBalancerStatusApplyConfiguration constructs a declarative configuration of the LoadBalancerStatus type for use with // apply. func LoadBalancerStatus() *LoadBalancerStatusApplyConfiguration { return &LoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go index 7662e32b31..c55d6803dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LocalObjectReferenceApplyConfiguration represents an declarative configuration of the LocalObjectReference type for use +// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use // with apply. type LocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// LocalObjectReferenceApplyConfiguration constructs an declarative configuration of the LocalObjectReference type for use with +// LocalObjectReferenceApplyConfiguration constructs a declarative configuration of the LocalObjectReference type for use with // apply. func LocalObjectReference() *LocalObjectReferenceApplyConfiguration { return &LocalObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go index 5d289bd12d..db711d9934 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LocalVolumeSourceApplyConfiguration represents an declarative configuration of the LocalVolumeSource type for use +// LocalVolumeSourceApplyConfiguration represents a declarative configuration of the LocalVolumeSource type for use // with apply. type LocalVolumeSourceApplyConfiguration struct { Path *string `json:"path,omitempty"` FSType *string `json:"fsType,omitempty"` } -// LocalVolumeSourceApplyConfiguration constructs an declarative configuration of the LocalVolumeSource type for use with +// LocalVolumeSourceApplyConfiguration constructs a declarative configuration of the LocalVolumeSource type for use with // apply. func LocalVolumeSource() *LocalVolumeSourceApplyConfiguration { return &LocalVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go index 4ff1d040cf..704c321652 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ModifyVolumeStatusApplyConfiguration represents an declarative configuration of the ModifyVolumeStatus type for use +// ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use // with apply. type ModifyVolumeStatusApplyConfiguration struct { TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` } -// ModifyVolumeStatusApplyConfiguration constructs an declarative configuration of the ModifyVolumeStatus type for use with +// ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with // apply. func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration { return &ModifyVolumeStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go index bdc9ef167c..0b77af183d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NamespaceApplyConfiguration represents an declarative configuration of the Namespace type for use +// NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use // with apply. type NamespaceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type NamespaceApplyConfiguration struct { Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"` } -// Namespace constructs an declarative configuration of the Namespace type for use with +// Namespace constructs a declarative configuration of the Namespace type for use with // apply. func Namespace(name string) *NamespaceApplyConfiguration { b := &NamespaceApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *NamespaceApplyConfiguration) WithStatus(value *NamespaceStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NamespaceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go index 8651978b0f..9784c3e6f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NamespaceConditionApplyConfiguration represents an declarative configuration of the NamespaceCondition type for use +// NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use // with apply. type NamespaceConditionApplyConfiguration struct { Type *v1.NamespaceConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type NamespaceConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// NamespaceConditionApplyConfiguration constructs an declarative configuration of the NamespaceCondition type for use with +// NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with // apply. func NamespaceCondition() *NamespaceConditionApplyConfiguration { return &NamespaceConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go index 9bc02d1fa2..6d7b7f1f95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// NamespaceSpecApplyConfiguration represents an declarative configuration of the NamespaceSpec type for use +// NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use // with apply. type NamespaceSpecApplyConfiguration struct { Finalizers []v1.FinalizerName `json:"finalizers,omitempty"` } -// NamespaceSpecApplyConfiguration constructs an declarative configuration of the NamespaceSpec type for use with +// NamespaceSpecApplyConfiguration constructs a declarative configuration of the NamespaceSpec type for use with // apply. func NamespaceSpec() *NamespaceSpecApplyConfiguration { return &NamespaceSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go index d950fd3161..314908109f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// NamespaceStatusApplyConfiguration represents an declarative configuration of the NamespaceStatus type for use +// NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use // with apply. type NamespaceStatusApplyConfiguration struct { Phase *v1.NamespacePhase `json:"phase,omitempty"` Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"` } -// NamespaceStatusApplyConfiguration constructs an declarative configuration of the NamespaceStatus type for use with +// NamespaceStatusApplyConfiguration constructs a declarative configuration of the NamespaceStatus type for use with // apply. func NamespaceStatus() *NamespaceStatusApplyConfiguration { return &NamespaceStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go index cb300ee81e..ed49a87a9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NFSVolumeSourceApplyConfiguration represents an declarative configuration of the NFSVolumeSource type for use +// NFSVolumeSourceApplyConfiguration represents a declarative configuration of the NFSVolumeSource type for use // with apply. type NFSVolumeSourceApplyConfiguration struct { Server *string `json:"server,omitempty"` @@ -26,7 +26,7 @@ type NFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// NFSVolumeSourceApplyConfiguration constructs an declarative configuration of the NFSVolumeSource type for use with +// NFSVolumeSourceApplyConfiguration constructs a declarative configuration of the NFSVolumeSource type for use with // apply. func NFSVolumeSource() *NFSVolumeSourceApplyConfiguration { return &NFSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go index 047f4ac1cb..ef13392591 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NodeApplyConfiguration represents an declarative configuration of the Node type for use +// NodeApplyConfiguration represents a declarative configuration of the Node type for use // with apply. type NodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type NodeApplyConfiguration struct { Status *NodeStatusApplyConfiguration `json:"status,omitempty"` } -// Node constructs an declarative configuration of the Node type for use with +// Node constructs a declarative configuration of the Node type for use with // apply. func Node(name string) *NodeApplyConfiguration { b := &NodeApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go index a1d4fbe04e..a9cb036c54 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeAddressApplyConfiguration represents an declarative configuration of the NodeAddress type for use +// NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use // with apply. type NodeAddressApplyConfiguration struct { Type *v1.NodeAddressType `json:"type,omitempty"` Address *string `json:"address,omitempty"` } -// NodeAddressApplyConfiguration constructs an declarative configuration of the NodeAddress type for use with +// NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with // apply. func NodeAddress() *NodeAddressApplyConfiguration { return &NodeAddressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go index e28ced6e46..5d11d746dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeAffinityApplyConfiguration represents an declarative configuration of the NodeAffinity type for use +// NodeAffinityApplyConfiguration represents a declarative configuration of the NodeAffinity type for use // with apply. type NodeAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution *NodeSelectorApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// NodeAffinityApplyConfiguration constructs an declarative configuration of the NodeAffinity type for use with +// NodeAffinityApplyConfiguration constructs a declarative configuration of the NodeAffinity type for use with // apply. func NodeAffinity() *NodeAffinityApplyConfiguration { return &NodeAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go index eb81ca543f..a1b8ed0f38 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NodeConditionApplyConfiguration represents an declarative configuration of the NodeCondition type for use +// NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use // with apply. type NodeConditionApplyConfiguration struct { Type *v1.NodeConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type NodeConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// NodeConditionApplyConfiguration constructs an declarative configuration of the NodeCondition type for use with +// NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with // apply. func NodeCondition() *NodeConditionApplyConfiguration { return &NodeConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go index 60567aa431..00a671fc0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeConfigSourceApplyConfiguration represents an declarative configuration of the NodeConfigSource type for use +// NodeConfigSourceApplyConfiguration represents a declarative configuration of the NodeConfigSource type for use // with apply. type NodeConfigSourceApplyConfiguration struct { ConfigMap *ConfigMapNodeConfigSourceApplyConfiguration `json:"configMap,omitempty"` } -// NodeConfigSourceApplyConfiguration constructs an declarative configuration of the NodeConfigSource type for use with +// NodeConfigSourceApplyConfiguration constructs a declarative configuration of the NodeConfigSource type for use with // apply. func NodeConfigSource() *NodeConfigSourceApplyConfiguration { return &NodeConfigSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go index 71447fe9c0..d5ccc45c6a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeConfigStatusApplyConfiguration represents an declarative configuration of the NodeConfigStatus type for use +// NodeConfigStatusApplyConfiguration represents a declarative configuration of the NodeConfigStatus type for use // with apply. type NodeConfigStatusApplyConfiguration struct { Assigned *NodeConfigSourceApplyConfiguration `json:"assigned,omitempty"` @@ -27,7 +27,7 @@ type NodeConfigStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// NodeConfigStatusApplyConfiguration constructs an declarative configuration of the NodeConfigStatus type for use with +// NodeConfigStatusApplyConfiguration constructs a declarative configuration of the NodeConfigStatus type for use with // apply. func NodeConfigStatus() *NodeConfigStatusApplyConfiguration { return &NodeConfigStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go index 4cabc7f526..11228b3691 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeDaemonEndpointsApplyConfiguration represents an declarative configuration of the NodeDaemonEndpoints type for use +// NodeDaemonEndpointsApplyConfiguration represents a declarative configuration of the NodeDaemonEndpoints type for use // with apply. type NodeDaemonEndpointsApplyConfiguration struct { KubeletEndpoint *DaemonEndpointApplyConfiguration `json:"kubeletEndpoint,omitempty"` } -// NodeDaemonEndpointsApplyConfiguration constructs an declarative configuration of the NodeDaemonEndpoints type for use with +// NodeDaemonEndpointsApplyConfiguration constructs a declarative configuration of the NodeDaemonEndpoints type for use with // apply. func NodeDaemonEndpoints() *NodeDaemonEndpointsApplyConfiguration { return &NodeDaemonEndpointsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go new file mode 100644 index 0000000000..678b0e36d6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeFeaturesApplyConfiguration represents a declarative configuration of the NodeFeatures type for use +// with apply. +type NodeFeaturesApplyConfiguration struct { + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty"` +} + +// NodeFeaturesApplyConfiguration constructs a declarative configuration of the NodeFeatures type for use with +// apply. +func NodeFeatures() *NodeFeaturesApplyConfiguration { + return &NodeFeaturesApplyConfiguration{} +} + +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *NodeFeaturesApplyConfiguration) WithSupplementalGroupsPolicy(value bool) *NodeFeaturesApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go index 9ada0a18ef..c7c664974e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeRuntimeHandlerApplyConfiguration represents an declarative configuration of the NodeRuntimeHandler type for use +// NodeRuntimeHandlerApplyConfiguration represents a declarative configuration of the NodeRuntimeHandler type for use // with apply. type NodeRuntimeHandlerApplyConfiguration struct { Name *string `json:"name,omitempty"` Features *NodeRuntimeHandlerFeaturesApplyConfiguration `json:"features,omitempty"` } -// NodeRuntimeHandlerApplyConfiguration constructs an declarative configuration of the NodeRuntimeHandler type for use with +// NodeRuntimeHandlerApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandler type for use with // apply. func NodeRuntimeHandler() *NodeRuntimeHandlerApplyConfiguration { return &NodeRuntimeHandlerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go index a3e3a52e88..a295b60969 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go @@ -18,13 +18,14 @@ limitations under the License. package v1 -// NodeRuntimeHandlerFeaturesApplyConfiguration represents an declarative configuration of the NodeRuntimeHandlerFeatures type for use +// NodeRuntimeHandlerFeaturesApplyConfiguration represents a declarative configuration of the NodeRuntimeHandlerFeatures type for use // with apply. type NodeRuntimeHandlerFeaturesApplyConfiguration struct { RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty"` + UserNamespaces *bool `json:"userNamespaces,omitempty"` } -// NodeRuntimeHandlerFeaturesApplyConfiguration constructs an declarative configuration of the NodeRuntimeHandlerFeatures type for use with +// NodeRuntimeHandlerFeaturesApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandlerFeatures type for use with // apply. func NodeRuntimeHandlerFeatures() *NodeRuntimeHandlerFeaturesApplyConfiguration { return &NodeRuntimeHandlerFeaturesApplyConfiguration{} @@ -37,3 +38,11 @@ func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithRecursiveReadOnlyMoun b.RecursiveReadOnlyMounts = &value return b } + +// WithUserNamespaces sets the UserNamespaces field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserNamespaces field is set to the value of the last call. +func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithUserNamespaces(value bool) *NodeRuntimeHandlerFeaturesApplyConfiguration { + b.UserNamespaces = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go index 5489097f5a..6eab109795 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeSelectorApplyConfiguration represents an declarative configuration of the NodeSelector type for use +// NodeSelectorApplyConfiguration represents a declarative configuration of the NodeSelector type for use // with apply. type NodeSelectorApplyConfiguration struct { NodeSelectorTerms []NodeSelectorTermApplyConfiguration `json:"nodeSelectorTerms,omitempty"` } -// NodeSelectorApplyConfiguration constructs an declarative configuration of the NodeSelector type for use with +// NodeSelectorApplyConfiguration constructs a declarative configuration of the NodeSelector type for use with // apply. func NodeSelector() *NodeSelectorApplyConfiguration { return &NodeSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go index a6e43e607e..7c383e06c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeSelectorRequirementApplyConfiguration represents an declarative configuration of the NodeSelectorRequirement type for use +// NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use // with apply. type NodeSelectorRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -30,7 +30,7 @@ type NodeSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// NodeSelectorRequirementApplyConfiguration constructs an declarative configuration of the NodeSelectorRequirement type for use with +// NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with // apply. func NodeSelectorRequirement() *NodeSelectorRequirementApplyConfiguration { return &NodeSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go index 13b3ddbc1b..9d0d780f3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeSelectorTermApplyConfiguration represents an declarative configuration of the NodeSelectorTerm type for use +// NodeSelectorTermApplyConfiguration represents a declarative configuration of the NodeSelectorTerm type for use // with apply. type NodeSelectorTermApplyConfiguration struct { MatchExpressions []NodeSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` MatchFields []NodeSelectorRequirementApplyConfiguration `json:"matchFields,omitempty"` } -// NodeSelectorTermApplyConfiguration constructs an declarative configuration of the NodeSelectorTerm type for use with +// NodeSelectorTermApplyConfiguration constructs a declarative configuration of the NodeSelectorTerm type for use with // apply. func NodeSelectorTerm() *NodeSelectorTermApplyConfiguration { return &NodeSelectorTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go index 63b61078d0..8ac3497127 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSpecApplyConfiguration represents an declarative configuration of the NodeSpec type for use +// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { PodCIDR *string `json:"podCIDR,omitempty"` @@ -30,7 +30,7 @@ type NodeSpecApplyConfiguration struct { DoNotUseExternalID *string `json:"externalID,omitempty"` } -// NodeSpecApplyConfiguration constructs an declarative configuration of the NodeSpec type for use with +// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with // apply. func NodeSpec() *NodeSpecApplyConfiguration { return &NodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go index a4a30a2685..8411c57ac0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeStatusApplyConfiguration represents an declarative configuration of the NodeStatus type for use +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use // with apply. type NodeStatusApplyConfiguration struct { Capacity *v1.ResourceList `json:"capacity,omitempty"` @@ -37,9 +37,10 @@ type NodeStatusApplyConfiguration struct { VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"` Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"` RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"` + Features *NodeFeaturesApplyConfiguration `json:"features,omitempty"` } -// NodeStatusApplyConfiguration constructs an declarative configuration of the NodeStatus type for use with +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with // apply. func NodeStatus() *NodeStatusApplyConfiguration { return &NodeStatusApplyConfiguration{} @@ -167,3 +168,11 @@ func (b *NodeStatusApplyConfiguration) WithRuntimeHandlers(values ...*NodeRuntim } return b } + +// WithFeatures sets the Features field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Features field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithFeatures(value *NodeFeaturesApplyConfiguration) *NodeStatusApplyConfiguration { + b.Features = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go index 2634ea9842..11ac50713c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSystemInfoApplyConfiguration represents an declarative configuration of the NodeSystemInfo type for use +// NodeSystemInfoApplyConfiguration represents a declarative configuration of the NodeSystemInfo type for use // with apply. type NodeSystemInfoApplyConfiguration struct { MachineID *string `json:"machineID,omitempty"` @@ -33,7 +33,7 @@ type NodeSystemInfoApplyConfiguration struct { Architecture *string `json:"architecture,omitempty"` } -// NodeSystemInfoApplyConfiguration constructs an declarative configuration of the NodeSystemInfo type for use with +// NodeSystemInfoApplyConfiguration constructs a declarative configuration of the NodeSystemInfo type for use with // apply. func NodeSystemInfo() *NodeSystemInfoApplyConfiguration { return &NodeSystemInfoApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go index 0c2402b3c7..c129c998b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ObjectFieldSelectorApplyConfiguration represents an declarative configuration of the ObjectFieldSelector type for use +// ObjectFieldSelectorApplyConfiguration represents a declarative configuration of the ObjectFieldSelector type for use // with apply. type ObjectFieldSelectorApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectFieldSelectorApplyConfiguration constructs an declarative configuration of the ObjectFieldSelector type for use with +// ObjectFieldSelectorApplyConfiguration constructs a declarative configuration of the ObjectFieldSelector type for use with // apply. func ObjectFieldSelector() *ObjectFieldSelectorApplyConfiguration { return &ObjectFieldSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go index 667fa84a81..4cd3f226ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use // with apply. type ObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -34,7 +34,7 @@ type ObjectReferenceApplyConfiguration struct { FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with // apply. func ObjectReference() *ObjectReferenceApplyConfiguration { return &ObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go index 2599c197e8..020f87411e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeApplyConfiguration represents an declarative configuration of the PersistentVolume type for use +// PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use // with apply. type PersistentVolumeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PersistentVolumeApplyConfiguration struct { Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolume constructs an declarative configuration of the PersistentVolume type for use with +// PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with // apply. func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { b := &PersistentVolumeApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PersistentVolumeApplyConfiguration) WithStatus(value *PersistentVolumeS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go index a0a0017018..81cf791443 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimApplyConfiguration represents an declarative configuration of the PersistentVolumeClaim type for use +// PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use // with apply. type PersistentVolumeClaimApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PersistentVolumeClaimApplyConfiguration struct { Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolumeClaim constructs an declarative configuration of the PersistentVolumeClaim type for use with +// PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with // apply. func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyConfiguration { b := &PersistentVolumeClaimApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithStatus(value *PersistentVo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go index 65449e92eb..80038c0677 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeClaimConditionApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimCondition type for use +// PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use // with apply. type PersistentVolumeClaimConditionApplyConfiguration struct { Type *v1.PersistentVolumeClaimConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type PersistentVolumeClaimConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PersistentVolumeClaimConditionApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimCondition type for use with +// PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with // apply. func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfiguration { return &PersistentVolumeClaimConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index 4db12fadb3..5ce671cd99 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use +// PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` @@ -37,7 +37,7 @@ type PersistentVolumeClaimSpecApplyConfiguration struct { VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with +// PersistentVolumeClaimSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimSpec type for use with // apply. func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration { return &PersistentVolumeClaimSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 1f6d5ae323..3eebf95ad5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use +// PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` @@ -35,7 +35,7 @@ type PersistentVolumeClaimStatusApplyConfiguration struct { ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` } -// PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with +// PersistentVolumeClaimStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimStatus type for use with // apply. func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguration { return &PersistentVolumeClaimStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go index 894d04f0b4..ed49702913 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimTemplateApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimTemplate type for use +// PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use // with apply. type PersistentVolumeClaimTemplateApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` } -// PersistentVolumeClaimTemplateApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimTemplate type for use with +// PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with // apply. func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfiguration { return &PersistentVolumeClaimTemplateApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSpec(value *Persis b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go index a498fa6a5e..ccccdfb493 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PersistentVolumeClaimVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimVolumeSource type for use +// PersistentVolumeClaimVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimVolumeSource type for use // with apply. type PersistentVolumeClaimVolumeSourceApplyConfiguration struct { ClaimName *string `json:"claimName,omitempty"` ReadOnly *bool `json:"readOnly,omitempty"` } -// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimVolumeSource type for use with +// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimVolumeSource type for use with // apply. func PersistentVolumeClaimVolumeSource() *PersistentVolumeClaimVolumeSourceApplyConfiguration { return &PersistentVolumeClaimVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go index 0576e7dd31..aba0124622 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PersistentVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeSource type for use +// PersistentVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeSource type for use // with apply. type PersistentVolumeSourceApplyConfiguration struct { GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"` @@ -45,7 +45,7 @@ type PersistentVolumeSourceApplyConfiguration struct { CSI *CSIPersistentVolumeSourceApplyConfiguration `json:"csi,omitempty"` } -// PersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeSource type for use with +// PersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeSource type for use with // apply. func PersistentVolumeSource() *PersistentVolumeSourceApplyConfiguration { return &PersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index 8a30dab649..074fa55d1f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PersistentVolumeSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeSpec type for use +// PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use // with apply. type PersistentVolumeSpecApplyConfiguration struct { Capacity *v1.ResourceList `json:"capacity,omitempty"` @@ -37,7 +37,7 @@ type PersistentVolumeSpecApplyConfiguration struct { VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with +// PersistentVolumeSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeSpec type for use with // apply. func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration { return &PersistentVolumeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go index a473c0e927..95ba90f48b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use +// PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use // with apply. type PersistentVolumeStatusApplyConfiguration struct { Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` @@ -32,7 +32,7 @@ type PersistentVolumeStatusApplyConfiguration struct { LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` } -// PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with +// PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with // apply. func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration { return &PersistentVolumeStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go index 43587d6768..d8dc103e2a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PhotonPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the PhotonPersistentDiskVolumeSource type for use +// PhotonPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the PhotonPersistentDiskVolumeSource type for use // with apply. type PhotonPersistentDiskVolumeSourceApplyConfiguration struct { PdID *string `json:"pdID,omitempty"` FSType *string `json:"fsType,omitempty"` } -// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the PhotonPersistentDiskVolumeSource type for use with +// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the PhotonPersistentDiskVolumeSource type for use with // apply. func PhotonPersistentDiskVolumeSource() *PhotonPersistentDiskVolumeSourceApplyConfiguration { return &PhotonPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go index 7210bd9836..507d57d6f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodApplyConfiguration represents an declarative configuration of the Pod type for use +// PodApplyConfiguration represents a declarative configuration of the Pod type for use // with apply. type PodApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodApplyConfiguration struct { Status *PodStatusApplyConfiguration `json:"status,omitempty"` } -// Pod constructs an declarative configuration of the Pod type for use with +// Pod constructs a declarative configuration of the Pod type for use with // apply. func Pod(name, namespace string) *PodApplyConfiguration { b := &PodApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PodApplyConfiguration) WithStatus(value *PodStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go index 7049c62121..23fed95464 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAffinityApplyConfiguration represents an declarative configuration of the PodAffinity type for use +// PodAffinityApplyConfiguration represents a declarative configuration of the PodAffinity type for use // with apply. type PodAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAffinityApplyConfiguration constructs an declarative configuration of the PodAffinity type for use with +// PodAffinityApplyConfiguration constructs a declarative configuration of the PodAffinity type for use with // apply. func PodAffinity() *PodAffinityApplyConfiguration { return &PodAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index ac1eab3d8c..3afce026d4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodAffinityTermApplyConfiguration represents an declarative configuration of the PodAffinityTerm type for use +// PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use // with apply. type PodAffinityTermApplyConfiguration struct { LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` @@ -33,7 +33,7 @@ type PodAffinityTermApplyConfiguration struct { MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } -// PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with +// PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with // apply. func PodAffinityTerm() *PodAffinityTermApplyConfiguration { return &PodAffinityTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go index 42681c54c4..ae9848963d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAntiAffinityApplyConfiguration represents an declarative configuration of the PodAntiAffinity type for use +// PodAntiAffinityApplyConfiguration represents a declarative configuration of the PodAntiAffinity type for use // with apply. type PodAntiAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAntiAffinityApplyConfiguration constructs an declarative configuration of the PodAntiAffinity type for use with +// PodAntiAffinityApplyConfiguration constructs a declarative configuration of the PodAntiAffinity type for use with // apply. func PodAntiAffinity() *PodAntiAffinityApplyConfiguration { return &PodAntiAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go index 610209f3c4..98968d26d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodConditionApplyConfiguration represents an declarative configuration of the PodCondition type for use +// PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use // with apply. type PodConditionApplyConfiguration struct { Type *v1.PodConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type PodConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PodConditionApplyConfiguration constructs an declarative configuration of the PodCondition type for use with +// PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with // apply. func PodCondition() *PodConditionApplyConfiguration { return &PodConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go index 0fe6a08349..2e0ce9a91e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PodDNSConfigApplyConfiguration represents an declarative configuration of the PodDNSConfig type for use +// PodDNSConfigApplyConfiguration represents a declarative configuration of the PodDNSConfig type for use // with apply. type PodDNSConfigApplyConfiguration struct { Nameservers []string `json:"nameservers,omitempty"` @@ -26,7 +26,7 @@ type PodDNSConfigApplyConfiguration struct { Options []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"` } -// PodDNSConfigApplyConfiguration constructs an declarative configuration of the PodDNSConfig type for use with +// PodDNSConfigApplyConfiguration constructs a declarative configuration of the PodDNSConfig type for use with // apply. func PodDNSConfig() *PodDNSConfigApplyConfiguration { return &PodDNSConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go index 327bf803b3..458b333bf2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodDNSConfigOptionApplyConfiguration represents an declarative configuration of the PodDNSConfigOption type for use +// PodDNSConfigOptionApplyConfiguration represents a declarative configuration of the PodDNSConfigOption type for use // with apply. type PodDNSConfigOptionApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// PodDNSConfigOptionApplyConfiguration constructs an declarative configuration of the PodDNSConfigOption type for use with +// PodDNSConfigOptionApplyConfiguration constructs a declarative configuration of the PodDNSConfigOption type for use with // apply. func PodDNSConfigOption() *PodDNSConfigOptionApplyConfiguration { return &PodDNSConfigOptionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go index 3c6e6b87ac..73f089856f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodIPApplyConfiguration represents an declarative configuration of the PodIP type for use +// PodIPApplyConfiguration represents a declarative configuration of the PodIP type for use // with apply. type PodIPApplyConfiguration struct { IP *string `json:"ip,omitempty"` } -// PodIPApplyConfiguration constructs an declarative configuration of the PodIP type for use with +// PodIPApplyConfiguration constructs a declarative configuration of the PodIP type for use with // apply. func PodIP() *PodIPApplyConfiguration { return &PodIPApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go index a5315d636b..7f156f817d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodOSApplyConfiguration represents an declarative configuration of the PodOS type for use +// PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use // with apply. type PodOSApplyConfiguration struct { Name *v1.OSName `json:"name,omitempty"` } -// PodOSApplyConfiguration constructs an declarative configuration of the PodOS type for use with +// PodOSApplyConfiguration constructs a declarative configuration of the PodOS type for use with // apply. func PodOS() *PodOSApplyConfiguration { return &PodOSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go index 9d3ad458ac..09746df1bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodReadinessGateApplyConfiguration represents an declarative configuration of the PodReadinessGate type for use +// PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use // with apply. type PodReadinessGateApplyConfiguration struct { ConditionType *v1.PodConditionType `json:"conditionType,omitempty"` } -// PodReadinessGateApplyConfiguration constructs an declarative configuration of the PodReadinessGate type for use with +// PodReadinessGateApplyConfiguration constructs a declarative configuration of the PodReadinessGate type for use with // apply. func PodReadinessGate() *PodReadinessGateApplyConfiguration { return &PodReadinessGateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go index 69b250d474..b0bd67fa11 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go @@ -18,14 +18,15 @@ limitations under the License. package v1 -// PodResourceClaimApplyConfiguration represents an declarative configuration of the PodResourceClaim type for use +// PodResourceClaimApplyConfiguration represents a declarative configuration of the PodResourceClaim type for use // with apply. type PodResourceClaimApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Source *ClaimSourceApplyConfiguration `json:"source,omitempty"` + Name *string `json:"name,omitempty"` + ResourceClaimName *string `json:"resourceClaimName,omitempty"` + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` } -// PodResourceClaimApplyConfiguration constructs an declarative configuration of the PodResourceClaim type for use with +// PodResourceClaimApplyConfiguration constructs a declarative configuration of the PodResourceClaim type for use with // apply. func PodResourceClaim() *PodResourceClaimApplyConfiguration { return &PodResourceClaimApplyConfiguration{} @@ -39,10 +40,18 @@ func (b *PodResourceClaimApplyConfiguration) WithName(value string) *PodResource return b } -// WithSource sets the Source field in the declarative configuration to the given value +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Source field is set to the value of the last call. -func (b *PodResourceClaimApplyConfiguration) WithSource(value *ClaimSourceApplyConfiguration) *PodResourceClaimApplyConfiguration { - b.Source = value +// If called multiple times, the ResourceClaimName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimName = &value + return b +} + +// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimTemplateName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimTemplateName = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go index ae79ca01b7..f60ad4b052 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodResourceClaimStatusApplyConfiguration represents an declarative configuration of the PodResourceClaimStatus type for use +// PodResourceClaimStatusApplyConfiguration represents a declarative configuration of the PodResourceClaimStatus type for use // with apply. type PodResourceClaimStatusApplyConfiguration struct { Name *string `json:"name,omitempty"` ResourceClaimName *string `json:"resourceClaimName,omitempty"` } -// PodResourceClaimStatusApplyConfiguration constructs an declarative configuration of the PodResourceClaimStatus type for use with +// PodResourceClaimStatusApplyConfiguration constructs a declarative configuration of the PodResourceClaimStatus type for use with // apply. func PodResourceClaimStatus() *PodResourceClaimStatusApplyConfiguration { return &PodResourceClaimStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go index f7649c2e92..3d91092776 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodSchedulingGateApplyConfiguration represents an declarative configuration of the PodSchedulingGate type for use +// PodSchedulingGateApplyConfiguration represents a declarative configuration of the PodSchedulingGate type for use // with apply. type PodSchedulingGateApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PodSchedulingGateApplyConfiguration constructs an declarative configuration of the PodSchedulingGate type for use with +// PodSchedulingGateApplyConfiguration constructs a declarative configuration of the PodSchedulingGate type for use with // apply. func PodSchedulingGate() *PodSchedulingGateApplyConfiguration { return &PodSchedulingGateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go index 6b340294eb..55085e6307 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go @@ -22,23 +22,24 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSecurityContextApplyConfiguration represents an declarative configuration of the PodSecurityContext type for use +// PodSecurityContextApplyConfiguration represents a declarative configuration of the PodSecurityContext type for use // with apply. type PodSecurityContextApplyConfiguration struct { - SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` - WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` - RunAsUser *int64 `json:"runAsUser,omitempty"` - RunAsGroup *int64 `json:"runAsGroup,omitempty"` - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` - SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` - FSGroup *int64 `json:"fsGroup,omitempty"` - Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` - FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` - SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` - AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` + SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` + WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` + RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsGroup *int64 `json:"runAsGroup,omitempty"` + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + SupplementalGroupsPolicy *corev1.SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty"` + FSGroup *int64 `json:"fsGroup,omitempty"` + Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` + FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` + SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` } -// PodSecurityContextApplyConfiguration constructs an declarative configuration of the PodSecurityContext type for use with +// PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with // apply. func PodSecurityContext() *PodSecurityContextApplyConfiguration { return &PodSecurityContextApplyConfiguration{} @@ -94,6 +95,14 @@ func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroups(values ... return b } +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroupsPolicy(value corev1.SupplementalGroupsPolicy) *PodSecurityContextApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} + // WithFSGroup sets the FSGroup field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroup field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index a9acd36fc7..8134e044f1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSpecApplyConfiguration represents an declarative configuration of the PodSpec type for use +// PodSpecApplyConfiguration represents a declarative configuration of the PodSpec type for use // with apply. type PodSpecApplyConfiguration struct { Volumes []VolumeApplyConfiguration `json:"volumes,omitempty"` @@ -66,7 +66,7 @@ type PodSpecApplyConfiguration struct { ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with +// PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with // apply. func PodSpec() *PodSpecApplyConfiguration { return &PodSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 1a58ab6be2..0b68996cd1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodStatusApplyConfiguration represents an declarative configuration of the PodStatus type for use +// PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use // with apply. type PodStatusApplyConfiguration struct { Phase *v1.PodPhase `json:"phase,omitempty"` @@ -44,7 +44,7 @@ type PodStatusApplyConfiguration struct { ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"` } -// PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with +// PodStatusApplyConfiguration constructs a declarative configuration of the PodStatus type for use with // apply. func PodStatus() *PodStatusApplyConfiguration { return &PodStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go index 7fe51d9e1b..b4c8a658a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateApplyConfiguration represents an declarative configuration of the PodTemplate type for use +// PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use // with apply. type PodTemplateApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type PodTemplateApplyConfiguration struct { Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// PodTemplate constructs an declarative configuration of the PodTemplate type for use with +// PodTemplate constructs a declarative configuration of the PodTemplate type for use with // apply. func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { b := &PodTemplateApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *PodTemplateApplyConfiguration) WithTemplate(value *PodTemplateSpecApply b.Template = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go index 82878a9ace..6146c01c7c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateSpecApplyConfiguration represents an declarative configuration of the PodTemplateSpec type for use +// PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use // with apply. type PodTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` } -// PodTemplateSpecApplyConfiguration constructs an declarative configuration of the PodTemplateSpec type for use with +// PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with // apply. func PodTemplateSpec() *PodTemplateSpecApplyConfiguration { return &PodTemplateSpecApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *PodTemplateSpecApplyConfiguration) WithSpec(value *PodSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go index 8c70c8f6cf..5e738cabdb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PortStatusApplyConfiguration represents an declarative configuration of the PortStatus type for use +// PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use // with apply. type PortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type PortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// PortStatusApplyConfiguration constructs an declarative configuration of the PortStatus type for use with +// PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with // apply. func PortStatus() *PortStatusApplyConfiguration { return &PortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go index 19cbb82edb..29715e0219 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PortworxVolumeSourceApplyConfiguration represents an declarative configuration of the PortworxVolumeSource type for use +// PortworxVolumeSourceApplyConfiguration represents a declarative configuration of the PortworxVolumeSource type for use // with apply. type PortworxVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -26,7 +26,7 @@ type PortworxVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// PortworxVolumeSourceApplyConfiguration constructs an declarative configuration of the PortworxVolumeSource type for use with +// PortworxVolumeSourceApplyConfiguration constructs a declarative configuration of the PortworxVolumeSource type for use with // apply. func PortworxVolumeSource() *PortworxVolumeSourceApplyConfiguration { return &PortworxVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go index a373e4afe0..b88a3646fc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PreferredSchedulingTermApplyConfiguration represents an declarative configuration of the PreferredSchedulingTerm type for use +// PreferredSchedulingTermApplyConfiguration represents a declarative configuration of the PreferredSchedulingTerm type for use // with apply. type PreferredSchedulingTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` Preference *NodeSelectorTermApplyConfiguration `json:"preference,omitempty"` } -// PreferredSchedulingTermApplyConfiguration constructs an declarative configuration of the PreferredSchedulingTerm type for use with +// PreferredSchedulingTermApplyConfiguration constructs a declarative configuration of the PreferredSchedulingTerm type for use with // apply. func PreferredSchedulingTerm() *PreferredSchedulingTermApplyConfiguration { return &PreferredSchedulingTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go index 10730557a0..3be1c9650b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeApplyConfiguration represents an declarative configuration of the Probe type for use +// ProbeApplyConfiguration represents a declarative configuration of the Probe type for use // with apply. type ProbeApplyConfiguration struct { ProbeHandlerApplyConfiguration `json:",inline"` @@ -30,7 +30,7 @@ type ProbeApplyConfiguration struct { TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } -// ProbeApplyConfiguration constructs an declarative configuration of the Probe type for use with +// ProbeApplyConfiguration constructs a declarative configuration of the Probe type for use with // apply. func Probe() *ProbeApplyConfiguration { return &ProbeApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go index 54f3344ac7..1f88745eab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeHandlerApplyConfiguration represents an declarative configuration of the ProbeHandler type for use +// ProbeHandlerApplyConfiguration represents a declarative configuration of the ProbeHandler type for use // with apply. type ProbeHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` @@ -27,7 +27,7 @@ type ProbeHandlerApplyConfiguration struct { GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"` } -// ProbeHandlerApplyConfiguration constructs an declarative configuration of the ProbeHandler type for use with +// ProbeHandlerApplyConfiguration constructs a declarative configuration of the ProbeHandler type for use with // apply. func ProbeHandler() *ProbeHandlerApplyConfiguration { return &ProbeHandlerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go index 0a9d1d88e6..c922ec8cc2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ProjectedVolumeSourceApplyConfiguration represents an declarative configuration of the ProjectedVolumeSource type for use +// ProjectedVolumeSourceApplyConfiguration represents a declarative configuration of the ProjectedVolumeSource type for use // with apply. type ProjectedVolumeSourceApplyConfiguration struct { Sources []VolumeProjectionApplyConfiguration `json:"sources,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// ProjectedVolumeSourceApplyConfiguration constructs an declarative configuration of the ProjectedVolumeSource type for use with +// ProjectedVolumeSourceApplyConfiguration constructs a declarative configuration of the ProjectedVolumeSource type for use with // apply. func ProjectedVolumeSource() *ProjectedVolumeSourceApplyConfiguration { return &ProjectedVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go index 646052ea4a..9a042a0a12 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// QuobyteVolumeSourceApplyConfiguration represents an declarative configuration of the QuobyteVolumeSource type for use +// QuobyteVolumeSourceApplyConfiguration represents a declarative configuration of the QuobyteVolumeSource type for use // with apply. type QuobyteVolumeSourceApplyConfiguration struct { Registry *string `json:"registry,omitempty"` @@ -29,7 +29,7 @@ type QuobyteVolumeSourceApplyConfiguration struct { Tenant *string `json:"tenant,omitempty"` } -// QuobyteVolumeSourceApplyConfiguration constructs an declarative configuration of the QuobyteVolumeSource type for use with +// QuobyteVolumeSourceApplyConfiguration constructs a declarative configuration of the QuobyteVolumeSource type for use with // apply. func QuobyteVolumeSource() *QuobyteVolumeSourceApplyConfiguration { return &QuobyteVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go index ffcb836eb0..64f25724a3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the RBDPersistentVolumeSource type for use +// RBDPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the RBDPersistentVolumeSource type for use // with apply. type RBDPersistentVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDPersistentVolumeSource type for use with +// RBDPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDPersistentVolumeSource type for use with // apply. func RBDPersistentVolumeSource() *RBDPersistentVolumeSourceApplyConfiguration { return &RBDPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go index 8e7c81732c..8dae198c09 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDVolumeSourceApplyConfiguration represents an declarative configuration of the RBDVolumeSource type for use +// RBDVolumeSourceApplyConfiguration represents a declarative configuration of the RBDVolumeSource type for use // with apply. type RBDVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDVolumeSource type for use with +// RBDVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDVolumeSource type for use with // apply. func RBDVolumeSource() *RBDVolumeSourceApplyConfiguration { return &RBDVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go index 7cd71460a9..b28f422dc7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicationControllerApplyConfiguration represents an declarative configuration of the ReplicationController type for use +// ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use // with apply. type ReplicationControllerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicationControllerApplyConfiguration struct { Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicationController constructs an declarative configuration of the ReplicationController type for use with +// ReplicationController constructs a declarative configuration of the ReplicationController type for use with // apply. func ReplicationController(name, namespace string) *ReplicationControllerApplyConfiguration { b := &ReplicationControllerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicationControllerApplyConfiguration) WithStatus(value *ReplicationC b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicationControllerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go index c3d56cc697..0d74c1db9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicationControllerConditionApplyConfiguration represents an declarative configuration of the ReplicationControllerCondition type for use +// ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use // with apply. type ReplicationControllerConditionApplyConfiguration struct { Type *v1.ReplicationControllerConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type ReplicationControllerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicationControllerConditionApplyConfiguration constructs an declarative configuration of the ReplicationControllerCondition type for use with +// ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with // apply. func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfiguration { return &ReplicationControllerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go index dd4e081d9f..07bac9f4c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerSpecApplyConfiguration represents an declarative configuration of the ReplicationControllerSpec type for use +// ReplicationControllerSpecApplyConfiguration represents a declarative configuration of the ReplicationControllerSpec type for use // with apply. type ReplicationControllerSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -27,7 +27,7 @@ type ReplicationControllerSpecApplyConfiguration struct { Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicationControllerSpecApplyConfiguration constructs an declarative configuration of the ReplicationControllerSpec type for use with +// ReplicationControllerSpecApplyConfiguration constructs a declarative configuration of the ReplicationControllerSpec type for use with // apply. func ReplicationControllerSpec() *ReplicationControllerSpecApplyConfiguration { return &ReplicationControllerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go index 1b994cfb8c..c8046aa5a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerStatusApplyConfiguration represents an declarative configuration of the ReplicationControllerStatus type for use +// ReplicationControllerStatusApplyConfiguration represents a declarative configuration of the ReplicationControllerStatus type for use // with apply. type ReplicationControllerStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicationControllerStatusApplyConfiguration struct { Conditions []ReplicationControllerConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicationControllerStatusApplyConfiguration constructs an declarative configuration of the ReplicationControllerStatus type for use with +// ReplicationControllerStatusApplyConfiguration constructs a declarative configuration of the ReplicationControllerStatus type for use with // apply. func ReplicationControllerStatus() *ReplicationControllerStatusApplyConfiguration { return &ReplicationControllerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go index 064dd4e2e4..b00c692485 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go @@ -18,13 +18,14 @@ limitations under the License. package v1 -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use // with apply. type ResourceClaimApplyConfiguration struct { - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` + Request *string `json:"request,omitempty"` } -// ResourceClaimApplyConfiguration constructs an declarative configuration of the ResourceClaim type for use with +// ResourceClaimApplyConfiguration constructs a declarative configuration of the ResourceClaim type for use with // apply. func ResourceClaim() *ResourceClaimApplyConfiguration { return &ResourceClaimApplyConfiguration{} @@ -37,3 +38,11 @@ func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimA b.Name = &value return b } + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithRequest(value string) *ResourceClaimApplyConfiguration { + b.Request = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go index 2741227dd7..1b4918a633 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceFieldSelectorApplyConfiguration represents an declarative configuration of the ResourceFieldSelector type for use +// ResourceFieldSelectorApplyConfiguration represents a declarative configuration of the ResourceFieldSelector type for use // with apply. type ResourceFieldSelectorApplyConfiguration struct { ContainerName *string `json:"containerName,omitempty"` @@ -30,7 +30,7 @@ type ResourceFieldSelectorApplyConfiguration struct { Divisor *resource.Quantity `json:"divisor,omitempty"` } -// ResourceFieldSelectorApplyConfiguration constructs an declarative configuration of the ResourceFieldSelector type for use with +// ResourceFieldSelectorApplyConfiguration constructs a declarative configuration of the ResourceFieldSelector type for use with // apply. func ResourceFieldSelector() *ResourceFieldSelectorApplyConfiguration { return &ResourceFieldSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go new file mode 100644 index 0000000000..5169cb4bc3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use +// with apply. +type ResourceHealthApplyConfiguration struct { + ResourceID *v1.ResourceID `json:"resourceID,omitempty"` + Health *v1.ResourceHealthStatus `json:"health,omitempty"` +} + +// ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with +// apply. +func ResourceHealth() *ResourceHealthApplyConfiguration { + return &ResourceHealthApplyConfiguration{} +} + +// WithResourceID sets the ResourceID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceID field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) *ResourceHealthApplyConfiguration { + b.ResourceID = &value + return b +} + +// WithHealth sets the Health field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Health field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithHealth(value v1.ResourceHealthStatus) *ResourceHealthApplyConfiguration { + b.Health = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go index 6b22ebdc59..2b78ba7038 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceQuotaApplyConfiguration represents an declarative configuration of the ResourceQuota type for use +// ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use // with apply. type ResourceQuotaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ResourceQuotaApplyConfiguration struct { Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` } -// ResourceQuota constructs an declarative configuration of the ResourceQuota type for use with +// ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with // apply. func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { b := &ResourceQuotaApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ResourceQuotaApplyConfiguration) WithStatus(value *ResourceQuotaStatusA b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceQuotaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go index feb454bc4b..0012ace25f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceQuotaSpecApplyConfiguration represents an declarative configuration of the ResourceQuotaSpec type for use +// ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use // with apply. type ResourceQuotaSpecApplyConfiguration struct { Hard *v1.ResourceList `json:"hard,omitempty"` @@ -30,7 +30,7 @@ type ResourceQuotaSpecApplyConfiguration struct { ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"` } -// ResourceQuotaSpecApplyConfiguration constructs an declarative configuration of the ResourceQuotaSpec type for use with +// ResourceQuotaSpecApplyConfiguration constructs a declarative configuration of the ResourceQuotaSpec type for use with // apply. func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration { return &ResourceQuotaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go index 4dced90f7a..364b96eecf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceQuotaStatusApplyConfiguration represents an declarative configuration of the ResourceQuotaStatus type for use +// ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use // with apply. type ResourceQuotaStatusApplyConfiguration struct { Hard *v1.ResourceList `json:"hard,omitempty"` Used *v1.ResourceList `json:"used,omitempty"` } -// ResourceQuotaStatusApplyConfiguration constructs an declarative configuration of the ResourceQuotaStatus type for use with +// ResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatus type for use with // apply. func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration { return &ResourceQuotaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go index 9482b8d713..51197862c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceRequirementsApplyConfiguration represents an declarative configuration of the ResourceRequirements type for use +// ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use // with apply. type ResourceRequirementsApplyConfiguration struct { Limits *v1.ResourceList `json:"limits,omitempty"` @@ -30,7 +30,7 @@ type ResourceRequirementsApplyConfiguration struct { Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"` } -// ResourceRequirementsApplyConfiguration constructs an declarative configuration of the ResourceRequirements type for use with +// ResourceRequirementsApplyConfiguration constructs a declarative configuration of the ResourceRequirements type for use with // apply. func ResourceRequirements() *ResourceRequirementsApplyConfiguration { return &ResourceRequirementsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go rename to vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go index 4f01372e4c..1e63c87f8c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go @@ -16,38 +16,42 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1 -// NamedResourcesInstanceApplyConfiguration represents an declarative configuration of the NamedResourcesInstance type for use +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use // with apply. -type NamedResourcesInstanceApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Attributes []NamedResourcesAttributeApplyConfiguration `json:"attributes,omitempty"` +type ResourceStatusApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"` } -// NamedResourcesInstanceApplyConfiguration constructs an declarative configuration of the NamedResourcesInstance type for use with +// ResourceStatusApplyConfiguration constructs a declarative configuration of the ResourceStatus type for use with // apply. -func NamedResourcesInstance() *NamedResourcesInstanceApplyConfiguration { - return &NamedResourcesInstanceApplyConfiguration{} +func ResourceStatus() *ResourceStatusApplyConfiguration { + return &ResourceStatusApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *NamedResourcesInstanceApplyConfiguration) WithName(value string) *NamedResourcesInstanceApplyConfiguration { +func (b *ResourceStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceStatusApplyConfiguration { b.Name = &value return b } -// WithAttributes adds the given value to the Attributes field in the declarative configuration +// WithResources adds the given value to the Resources field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Attributes field. -func (b *NamedResourcesInstanceApplyConfiguration) WithAttributes(values ...*NamedResourcesAttributeApplyConfiguration) *NamedResourcesInstanceApplyConfiguration { +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourceStatusApplyConfiguration) WithResources(values ...*ResourceHealthApplyConfiguration) *ResourceStatusApplyConfiguration { for i := range values { if values[i] == nil { - panic("nil value passed to WithAttributes") + panic("nil value passed to WithResources") } - b.Attributes = append(b.Attributes, *values[i]) + b.Resources = append(b.Resources, *values[i]) } return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go index fffb5b186d..b07f46de91 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOPersistentVolumeSource type for use +// ScaleIOPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOPersistentVolumeSource type for use // with apply. type ScaleIOPersistentVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOPersistentVolumeSource type for use with +// ScaleIOPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOPersistentVolumeSource type for use with // apply. func ScaleIOPersistentVolumeSource() *ScaleIOPersistentVolumeSourceApplyConfiguration { return &ScaleIOPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go index b54e1161eb..740c05ebb7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOVolumeSource type for use +// ScaleIOVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOVolumeSource type for use // with apply. type ScaleIOVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOVolumeSource type for use with +// ScaleIOVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOVolumeSource type for use with // apply. func ScaleIOVolumeSource() *ScaleIOVolumeSourceApplyConfiguration { return &ScaleIOVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go index c901a2ae6d..c6ec87827f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ScopedResourceSelectorRequirementApplyConfiguration represents an declarative configuration of the ScopedResourceSelectorRequirement type for use +// ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use // with apply. type ScopedResourceSelectorRequirementApplyConfiguration struct { ScopeName *v1.ResourceQuotaScope `json:"scopeName,omitempty"` @@ -30,7 +30,7 @@ type ScopedResourceSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// ScopedResourceSelectorRequirementApplyConfiguration constructs an declarative configuration of the ScopedResourceSelectorRequirement type for use with +// ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with // apply. func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApplyConfiguration { return &ScopedResourceSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go index 3251e9dc18..a9fb9a1b19 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScopeSelectorApplyConfiguration represents an declarative configuration of the ScopeSelector type for use +// ScopeSelectorApplyConfiguration represents a declarative configuration of the ScopeSelector type for use // with apply. type ScopeSelectorApplyConfiguration struct { MatchExpressions []ScopedResourceSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// ScopeSelectorApplyConfiguration constructs an declarative configuration of the ScopeSelector type for use with +// ScopeSelectorApplyConfiguration constructs a declarative configuration of the ScopeSelector type for use with // apply. func ScopeSelector() *ScopeSelectorApplyConfiguration { return &ScopeSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go index 9818a00e7a..eb3077a051 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// SeccompProfileApplyConfiguration represents an declarative configuration of the SeccompProfile type for use +// SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use // with apply. type SeccompProfileApplyConfiguration struct { Type *v1.SeccompProfileType `json:"type,omitempty"` LocalhostProfile *string `json:"localhostProfile,omitempty"` } -// SeccompProfileApplyConfiguration constructs an declarative configuration of the SeccompProfile type for use with +// SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with // apply. func SeccompProfile() *SeccompProfileApplyConfiguration { return &SeccompProfileApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go index 3f7e1eb039..1d850b00bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// SecretApplyConfiguration represents an declarative configuration of the Secret type for use +// SecretApplyConfiguration represents a declarative configuration of the Secret type for use // with apply. type SecretApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -38,7 +38,7 @@ type SecretApplyConfiguration struct { Type *corev1.SecretType `json:"type,omitempty"` } -// Secret constructs an declarative configuration of the Secret type for use with +// Secret constructs a declarative configuration of the Secret type for use with // apply. func Secret(name, namespace string) *SecretApplyConfiguration { b := &SecretApplyConfiguration{} @@ -286,3 +286,9 @@ func (b *SecretApplyConfiguration) WithType(value corev1.SecretType) *SecretAppl b.Type = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *SecretApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go index 7b22a8d0b2..ba99b7f5fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretEnvSourceApplyConfiguration represents an declarative configuration of the SecretEnvSource type for use +// SecretEnvSourceApplyConfiguration represents a declarative configuration of the SecretEnvSource type for use // with apply. type SecretEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// SecretEnvSourceApplyConfiguration constructs an declarative configuration of the SecretEnvSource type for use with +// SecretEnvSourceApplyConfiguration constructs a declarative configuration of the SecretEnvSource type for use with // apply. func SecretEnvSource() *SecretEnvSourceApplyConfiguration { return &SecretEnvSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go index b8464a348a..2d490b8108 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretKeySelectorApplyConfiguration represents an declarative configuration of the SecretKeySelector type for use +// SecretKeySelectorApplyConfiguration represents a declarative configuration of the SecretKeySelector type for use // with apply. type SecretKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretKeySelectorApplyConfiguration constructs an declarative configuration of the SecretKeySelector type for use with +// SecretKeySelectorApplyConfiguration constructs a declarative configuration of the SecretKeySelector type for use with // apply. func SecretKeySelector() *SecretKeySelectorApplyConfiguration { return &SecretKeySelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go index e8edc61273..65ce3c66da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretProjectionApplyConfiguration represents an declarative configuration of the SecretProjection type for use +// SecretProjectionApplyConfiguration represents a declarative configuration of the SecretProjection type for use // with apply. type SecretProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretProjectionApplyConfiguration constructs an declarative configuration of the SecretProjection type for use with +// SecretProjectionApplyConfiguration constructs a declarative configuration of the SecretProjection type for use with // apply. func SecretProjection() *SecretProjectionApplyConfiguration { return &SecretProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go index 95579d003e..f5e0de23aa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretReferenceApplyConfiguration represents an declarative configuration of the SecretReference type for use +// SecretReferenceApplyConfiguration represents a declarative configuration of the SecretReference type for use // with apply. type SecretReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` Namespace *string `json:"namespace,omitempty"` } -// SecretReferenceApplyConfiguration constructs an declarative configuration of the SecretReference type for use with +// SecretReferenceApplyConfiguration constructs a declarative configuration of the SecretReference type for use with // apply. func SecretReference() *SecretReferenceApplyConfiguration { return &SecretReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go index bcb441e9f3..9f765d354d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretVolumeSourceApplyConfiguration represents an declarative configuration of the SecretVolumeSource type for use +// SecretVolumeSourceApplyConfiguration represents a declarative configuration of the SecretVolumeSource type for use // with apply. type SecretVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type SecretVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretVolumeSourceApplyConfiguration constructs an declarative configuration of the SecretVolumeSource type for use with +// SecretVolumeSourceApplyConfiguration constructs a declarative configuration of the SecretVolumeSource type for use with // apply. func SecretVolumeSource() *SecretVolumeSourceApplyConfiguration { return &SecretVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go index 4146b765da..99faab72da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// SecurityContextApplyConfiguration represents an declarative configuration of the SecurityContext type for use +// SecurityContextApplyConfiguration represents a declarative configuration of the SecurityContext type for use // with apply. type SecurityContextApplyConfiguration struct { Capabilities *CapabilitiesApplyConfiguration `json:"capabilities,omitempty"` @@ -39,7 +39,7 @@ type SecurityContextApplyConfiguration struct { AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` } -// SecurityContextApplyConfiguration constructs an declarative configuration of the SecurityContext type for use with +// SecurityContextApplyConfiguration constructs a declarative configuration of the SecurityContext type for use with // apply. func SecurityContext() *SecurityContextApplyConfiguration { return &SecurityContextApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go index 2938faa18e..bad01300f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SELinuxOptionsApplyConfiguration represents an declarative configuration of the SELinuxOptions type for use +// SELinuxOptionsApplyConfiguration represents a declarative configuration of the SELinuxOptions type for use // with apply. type SELinuxOptionsApplyConfiguration struct { User *string `json:"user,omitempty"` @@ -27,7 +27,7 @@ type SELinuxOptionsApplyConfiguration struct { Level *string `json:"level,omitempty"` } -// SELinuxOptionsApplyConfiguration constructs an declarative configuration of the SELinuxOptions type for use with +// SELinuxOptionsApplyConfiguration constructs a declarative configuration of the SELinuxOptions type for use with // apply. func SELinuxOptions() *SELinuxOptionsApplyConfiguration { return &SELinuxOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go index 3fa1195237..2dac0589d2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceApplyConfiguration represents an declarative configuration of the Service type for use +// ServiceApplyConfiguration represents a declarative configuration of the Service type for use // with apply. type ServiceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ServiceApplyConfiguration struct { Status *ServiceStatusApplyConfiguration `json:"status,omitempty"` } -// Service constructs an declarative configuration of the Service type for use with +// Service constructs a declarative configuration of the Service type for use with // apply. func Service(name, namespace string) *ServiceApplyConfiguration { b := &ServiceApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ServiceApplyConfiguration) WithStatus(value *ServiceStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go index 53a8193750..26d33deb95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceAccountApplyConfiguration represents an declarative configuration of the ServiceAccount type for use +// ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use // with apply. type ServiceAccountApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ServiceAccountApplyConfiguration struct { AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` } -// ServiceAccount constructs an declarative configuration of the ServiceAccount type for use with +// ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with // apply. func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { b := &ServiceAccountApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *ServiceAccountApplyConfiguration) WithAutomountServiceAccountToken(valu b.AutomountServiceAccountToken = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceAccountApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go index a52fad7d8d..fab81bf8a2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceAccountTokenProjectionApplyConfiguration represents an declarative configuration of the ServiceAccountTokenProjection type for use +// ServiceAccountTokenProjectionApplyConfiguration represents a declarative configuration of the ServiceAccountTokenProjection type for use // with apply. type ServiceAccountTokenProjectionApplyConfiguration struct { Audience *string `json:"audience,omitempty"` @@ -26,7 +26,7 @@ type ServiceAccountTokenProjectionApplyConfiguration struct { Path *string `json:"path,omitempty"` } -// ServiceAccountTokenProjectionApplyConfiguration constructs an declarative configuration of the ServiceAccountTokenProjection type for use with +// ServiceAccountTokenProjectionApplyConfiguration constructs a declarative configuration of the ServiceAccountTokenProjection type for use with // apply. func ServiceAccountTokenProjection() *ServiceAccountTokenProjectionApplyConfiguration { return &ServiceAccountTokenProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go index 8bc63bd950..e889f21345 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// ServicePortApplyConfiguration represents an declarative configuration of the ServicePort type for use +// ServicePortApplyConfiguration represents a declarative configuration of the ServicePort type for use // with apply. type ServicePortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -34,7 +34,7 @@ type ServicePortApplyConfiguration struct { NodePort *int32 `json:"nodePort,omitempty"` } -// ServicePortApplyConfiguration constructs an declarative configuration of the ServicePort type for use with +// ServicePortApplyConfiguration constructs a declarative configuration of the ServicePort type for use with // apply. func ServicePort() *ServicePortApplyConfiguration { return &ServicePortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index 5cfbcb700f..41367dce4f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ServiceSpecApplyConfiguration represents an declarative configuration of the ServiceSpec type for use +// ServiceSpecApplyConfiguration represents a declarative configuration of the ServiceSpec type for use // with apply. type ServiceSpecApplyConfiguration struct { Ports []ServicePortApplyConfiguration `json:"ports,omitempty"` @@ -47,7 +47,7 @@ type ServiceSpecApplyConfiguration struct { TrafficDistribution *string `json:"trafficDistribution,omitempty"` } -// ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with +// ServiceSpecApplyConfiguration constructs a declarative configuration of the ServiceSpec type for use with // apply. func ServiceSpec() *ServiceSpecApplyConfiguration { return &ServiceSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go index 2347cec678..11c3f8a80a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go @@ -22,14 +22,14 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceStatusApplyConfiguration represents an declarative configuration of the ServiceStatus type for use +// ServiceStatusApplyConfiguration represents a declarative configuration of the ServiceStatus type for use // with apply. type ServiceStatusApplyConfiguration struct { LoadBalancer *LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ServiceStatusApplyConfiguration constructs an declarative configuration of the ServiceStatus type for use with +// ServiceStatusApplyConfiguration constructs a declarative configuration of the ServiceStatus type for use with // apply. func ServiceStatus() *ServiceStatusApplyConfiguration { return &ServiceStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go index 7016f836a1..13b045fffc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SessionAffinityConfigApplyConfiguration represents an declarative configuration of the SessionAffinityConfig type for use +// SessionAffinityConfigApplyConfiguration represents a declarative configuration of the SessionAffinityConfig type for use // with apply. type SessionAffinityConfigApplyConfiguration struct { ClientIP *ClientIPConfigApplyConfiguration `json:"clientIP,omitempty"` } -// SessionAffinityConfigApplyConfiguration constructs an declarative configuration of the SessionAffinityConfig type for use with +// SessionAffinityConfigApplyConfiguration constructs a declarative configuration of the SessionAffinityConfig type for use with // apply. func SessionAffinityConfig() *SessionAffinityConfigApplyConfiguration { return &SessionAffinityConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go index 8b3284536a..b4115609b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use +// SleepActionApplyConfiguration represents a declarative configuration of the SleepAction type for use // with apply. type SleepActionApplyConfiguration struct { Seconds *int64 `json:"seconds,omitempty"` } -// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with +// SleepActionApplyConfiguration constructs a declarative configuration of the SleepAction type for use with // apply. func SleepAction() *SleepActionApplyConfiguration { return &SleepActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go index 00ed39ccb0..7381a498e1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSPersistentVolumeSource type for use +// StorageOSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSPersistentVolumeSource type for use // with apply. type StorageOSPersistentVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSPersistentVolumeSourceApplyConfiguration struct { SecretRef *ObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSPersistentVolumeSource type for use with +// StorageOSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSPersistentVolumeSource type for use with // apply. func StorageOSPersistentVolumeSource() *StorageOSPersistentVolumeSourceApplyConfiguration { return &StorageOSPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go index 7f3b810cf6..81d9373c19 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSVolumeSource type for use +// StorageOSVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSVolumeSource type for use // with apply. type StorageOSVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSVolumeSource type for use with +// StorageOSVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSVolumeSource type for use with // apply. func StorageOSVolumeSource() *StorageOSVolumeSourceApplyConfiguration { return &StorageOSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go index deab9e0b38..7719eb7d60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SysctlApplyConfiguration represents an declarative configuration of the Sysctl type for use +// SysctlApplyConfiguration represents a declarative configuration of the Sysctl type for use // with apply. type SysctlApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// SysctlApplyConfiguration constructs an declarative configuration of the Sysctl type for use with +// SysctlApplyConfiguration constructs a declarative configuration of the Sysctl type for use with // apply. func Sysctl() *SysctlApplyConfiguration { return &SysctlApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go index 4672b87427..a34fb05526 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TaintApplyConfiguration represents an declarative configuration of the Taint type for use +// TaintApplyConfiguration represents a declarative configuration of the Taint type for use // with apply. type TaintApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -32,7 +32,7 @@ type TaintApplyConfiguration struct { TimeAdded *metav1.Time `json:"timeAdded,omitempty"` } -// TaintApplyConfiguration constructs an declarative configuration of the Taint type for use with +// TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with // apply. func Taint() *TaintApplyConfiguration { return &TaintApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go index bd038fc3ae..cba1a7d081 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// TCPSocketActionApplyConfiguration represents an declarative configuration of the TCPSocketAction type for use +// TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use // with apply. type TCPSocketActionApplyConfiguration struct { Port *intstr.IntOrString `json:"port,omitempty"` Host *string `json:"host,omitempty"` } -// TCPSocketActionApplyConfiguration constructs an declarative configuration of the TCPSocketAction type for use with +// TCPSocketActionApplyConfiguration constructs a declarative configuration of the TCPSocketAction type for use with // apply. func TCPSocketAction() *TCPSocketActionApplyConfiguration { return &TCPSocketActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go index 1a92a8c668..1bcc85b65f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// TolerationApplyConfiguration represents an declarative configuration of the Toleration type for use +// TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use // with apply. type TolerationApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -32,7 +32,7 @@ type TolerationApplyConfiguration struct { TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` } -// TolerationApplyConfiguration constructs an declarative configuration of the Toleration type for use with +// TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with // apply. func Toleration() *TolerationApplyConfiguration { return &TolerationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go index 9581490de2..674ddec93c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TopologySelectorLabelRequirementApplyConfiguration represents an declarative configuration of the TopologySelectorLabelRequirement type for use +// TopologySelectorLabelRequirementApplyConfiguration represents a declarative configuration of the TopologySelectorLabelRequirement type for use // with apply. type TopologySelectorLabelRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` Values []string `json:"values,omitempty"` } -// TopologySelectorLabelRequirementApplyConfiguration constructs an declarative configuration of the TopologySelectorLabelRequirement type for use with +// TopologySelectorLabelRequirementApplyConfiguration constructs a declarative configuration of the TopologySelectorLabelRequirement type for use with // apply. func TopologySelectorLabelRequirement() *TopologySelectorLabelRequirementApplyConfiguration { return &TopologySelectorLabelRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go index a025b8a2a8..7812ae5204 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// TopologySelectorTermApplyConfiguration represents an declarative configuration of the TopologySelectorTerm type for use +// TopologySelectorTermApplyConfiguration represents a declarative configuration of the TopologySelectorTerm type for use // with apply. type TopologySelectorTermApplyConfiguration struct { MatchLabelExpressions []TopologySelectorLabelRequirementApplyConfiguration `json:"matchLabelExpressions,omitempty"` } -// TopologySelectorTermApplyConfiguration constructs an declarative configuration of the TopologySelectorTerm type for use with +// TopologySelectorTermApplyConfiguration constructs a declarative configuration of the TopologySelectorTerm type for use with // apply. func TopologySelectorTerm() *TopologySelectorTermApplyConfiguration { return &TopologySelectorTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go index fbfa8fa886..b21d233513 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use +// TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the TopologySpreadConstraint type for use // with apply. type TopologySpreadConstraintApplyConfiguration struct { MaxSkew *int32 `json:"maxSkew,omitempty"` @@ -36,7 +36,7 @@ type TopologySpreadConstraintApplyConfiguration struct { MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` } -// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with +// TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the TopologySpreadConstraint type for use with // apply. func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration { return &TopologySpreadConstraintApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go index cdc2eb7d34..1e63b79889 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// TypedLocalObjectReferenceApplyConfiguration represents an declarative configuration of the TypedLocalObjectReference type for use +// TypedLocalObjectReferenceApplyConfiguration represents a declarative configuration of the TypedLocalObjectReference type for use // with apply. type TypedLocalObjectReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type TypedLocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// TypedLocalObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedLocalObjectReference type for use with +// TypedLocalObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedLocalObjectReference type for use with // apply. func TypedLocalObjectReference() *TypedLocalObjectReferenceApplyConfiguration { return &TypedLocalObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go index d9a01c9c3a..f07de8902e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// TypedObjectReferenceApplyConfiguration represents an declarative configuration of the TypedObjectReference type for use +// TypedObjectReferenceApplyConfiguration represents a declarative configuration of the TypedObjectReference type for use // with apply. type TypedObjectReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -27,7 +27,7 @@ type TypedObjectReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// TypedObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedObjectReference type for use with +// TypedObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedObjectReference type for use with // apply. func TypedObjectReference() *TypedObjectReferenceApplyConfiguration { return &TypedObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go index db0686bce7..9a48f83497 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeApplyConfiguration represents an declarative configuration of the Volume type for use +// VolumeApplyConfiguration represents a declarative configuration of the Volume type for use // with apply. type VolumeApplyConfiguration struct { Name *string `json:"name,omitempty"` VolumeSourceApplyConfiguration `json:",inline"` } -// VolumeApplyConfiguration constructs an declarative configuration of the Volume type for use with +// VolumeApplyConfiguration constructs a declarative configuration of the Volume type for use with // apply. func Volume() *VolumeApplyConfiguration { return &VolumeApplyConfiguration{} @@ -270,3 +270,11 @@ func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApp b.Ephemeral = value return b } + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { + b.Image = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go index ea18ca8d9e..0bc52aad2a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeDeviceApplyConfiguration represents an declarative configuration of the VolumeDevice type for use +// VolumeDeviceApplyConfiguration represents a declarative configuration of the VolumeDevice type for use // with apply. type VolumeDeviceApplyConfiguration struct { Name *string `json:"name,omitempty"` DevicePath *string `json:"devicePath,omitempty"` } -// VolumeDeviceApplyConfiguration constructs an declarative configuration of the VolumeDevice type for use with +// VolumeDeviceApplyConfiguration constructs a declarative configuration of the VolumeDevice type for use with // apply. func VolumeDevice() *VolumeDeviceApplyConfiguration { return &VolumeDeviceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go index 358658350e..49f22cc4e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// VolumeMountApplyConfiguration represents an declarative configuration of the VolumeMount type for use +// VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use // with apply. type VolumeMountApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -34,7 +34,7 @@ type VolumeMountApplyConfiguration struct { SubPathExpr *string `json:"subPathExpr,omitempty"` } -// VolumeMountApplyConfiguration constructs an declarative configuration of the VolumeMount type for use with +// VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with // apply. func VolumeMount() *VolumeMountApplyConfiguration { return &VolumeMountApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go index c3d187fdfa..a0a9b5401c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// VolumeMountStatusApplyConfiguration represents an declarative configuration of the VolumeMountStatus type for use +// VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use // with apply. type VolumeMountStatusApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type VolumeMountStatusApplyConfiguration struct { RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` } -// VolumeMountStatusApplyConfiguration constructs an declarative configuration of the VolumeMountStatus type for use with +// VolumeMountStatusApplyConfiguration constructs a declarative configuration of the VolumeMountStatus type for use with // apply. func VolumeMountStatus() *VolumeMountStatusApplyConfiguration { return &VolumeMountStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go index 32bfd82928..9198c25dc8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeAffinityApplyConfiguration represents an declarative configuration of the VolumeNodeAffinity type for use +// VolumeNodeAffinityApplyConfiguration represents a declarative configuration of the VolumeNodeAffinity type for use // with apply. type VolumeNodeAffinityApplyConfiguration struct { Required *NodeSelectorApplyConfiguration `json:"required,omitempty"` } -// VolumeNodeAffinityApplyConfiguration constructs an declarative configuration of the VolumeNodeAffinity type for use with +// VolumeNodeAffinityApplyConfiguration constructs a declarative configuration of the VolumeNodeAffinity type for use with // apply. func VolumeNodeAffinity() *VolumeNodeAffinityApplyConfiguration { return &VolumeNodeAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go index a2ef0a9943..c14e9fe697 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeProjectionApplyConfiguration represents an declarative configuration of the VolumeProjection type for use +// VolumeProjectionApplyConfiguration represents a declarative configuration of the VolumeProjection type for use // with apply. type VolumeProjectionApplyConfiguration struct { Secret *SecretProjectionApplyConfiguration `json:"secret,omitempty"` @@ -28,7 +28,7 @@ type VolumeProjectionApplyConfiguration struct { ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"` } -// VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with +// VolumeProjectionApplyConfiguration constructs a declarative configuration of the VolumeProjection type for use with // apply. func VolumeProjection() *VolumeProjectionApplyConfiguration { return &VolumeProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go index 89ad1da8b3..ae849f7741 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// VolumeResourceRequirementsApplyConfiguration represents an declarative configuration of the VolumeResourceRequirements type for use +// VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use // with apply. type VolumeResourceRequirementsApplyConfiguration struct { Limits *v1.ResourceList `json:"limits,omitempty"` Requests *v1.ResourceList `json:"requests,omitempty"` } -// VolumeResourceRequirementsApplyConfiguration constructs an declarative configuration of the VolumeResourceRequirements type for use with +// VolumeResourceRequirementsApplyConfiguration constructs a declarative configuration of the VolumeResourceRequirements type for use with // apply. func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration { return &VolumeResourceRequirementsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go index 4a8d316dd5..aeead953cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeSourceApplyConfiguration represents an declarative configuration of the VolumeSource type for use +// VolumeSourceApplyConfiguration represents a declarative configuration of the VolumeSource type for use // with apply. type VolumeSourceApplyConfiguration struct { HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"` @@ -50,9 +50,10 @@ type VolumeSourceApplyConfiguration struct { StorageOS *StorageOSVolumeSourceApplyConfiguration `json:"storageos,omitempty"` CSI *CSIVolumeSourceApplyConfiguration `json:"csi,omitempty"` Ephemeral *EphemeralVolumeSourceApplyConfiguration `json:"ephemeral,omitempty"` + Image *ImageVolumeSourceApplyConfiguration `json:"image,omitempty"` } -// VolumeSourceApplyConfiguration constructs an declarative configuration of the VolumeSource type for use with +// VolumeSourceApplyConfiguration constructs a declarative configuration of the VolumeSource type for use with // apply. func VolumeSource() *VolumeSourceApplyConfiguration { return &VolumeSourceApplyConfiguration{} @@ -289,3 +290,11 @@ func (b *VolumeSourceApplyConfiguration) WithEphemeral(value *EphemeralVolumeSou b.Ephemeral = value return b } + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeSourceApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeSourceApplyConfiguration { + b.Image = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go index ff3e3e27d9..ea8fd8d62e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VsphereVirtualDiskVolumeSourceApplyConfiguration represents an declarative configuration of the VsphereVirtualDiskVolumeSource type for use +// VsphereVirtualDiskVolumeSourceApplyConfiguration represents a declarative configuration of the VsphereVirtualDiskVolumeSource type for use // with apply. type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { VolumePath *string `json:"volumePath,omitempty"` @@ -27,7 +27,7 @@ type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { StoragePolicyID *string `json:"storagePolicyID,omitempty"` } -// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the VsphereVirtualDiskVolumeSource type for use with +// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the VsphereVirtualDiskVolumeSource type for use with // apply. func VsphereVirtualDiskVolumeSource() *VsphereVirtualDiskVolumeSourceApplyConfiguration { return &VsphereVirtualDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go index eb99d06ffa..c49ef93eb4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// WeightedPodAffinityTermApplyConfiguration represents an declarative configuration of the WeightedPodAffinityTerm type for use +// WeightedPodAffinityTermApplyConfiguration represents a declarative configuration of the WeightedPodAffinityTerm type for use // with apply. type WeightedPodAffinityTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` PodAffinityTerm *PodAffinityTermApplyConfiguration `json:"podAffinityTerm,omitempty"` } -// WeightedPodAffinityTermApplyConfiguration constructs an declarative configuration of the WeightedPodAffinityTerm type for use with +// WeightedPodAffinityTermApplyConfiguration constructs a declarative configuration of the WeightedPodAffinityTerm type for use with // apply. func WeightedPodAffinityTerm() *WeightedPodAffinityTermApplyConfiguration { return &WeightedPodAffinityTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go index 20692e0146..bb37a500b4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WindowsSecurityContextOptionsApplyConfiguration represents an declarative configuration of the WindowsSecurityContextOptions type for use +// WindowsSecurityContextOptionsApplyConfiguration represents a declarative configuration of the WindowsSecurityContextOptions type for use // with apply. type WindowsSecurityContextOptionsApplyConfiguration struct { GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"` @@ -27,7 +27,7 @@ type WindowsSecurityContextOptionsApplyConfiguration struct { HostProcess *bool `json:"hostProcess,omitempty"` } -// WindowsSecurityContextOptionsApplyConfiguration constructs an declarative configuration of the WindowsSecurityContextOptions type for use with +// WindowsSecurityContextOptionsApplyConfiguration constructs a declarative configuration of the WindowsSecurityContextOptions type for use with // apply. func WindowsSecurityContextOptions() *WindowsSecurityContextOptionsApplyConfiguration { return &WindowsSecurityContextOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go index d8c2359a3b..df45a6fb8a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -35,7 +35,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go index 68c25dd57c..20f0b97124 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go index 6eb9f21a51..d2d0f67769 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go index c712956009..12908deb61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go index 640613753d..97002d2bbb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go index 192a5ad2e8..505d11ae2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go index 724c2d007c..5d87dae72e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -34,7 +34,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go index bc0438f90b..13f5fa5575 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go index 41d80206b3..99f69027a8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go index 9a3a31b965..07cfc684b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go index 74a24773cc..888319bc0f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go index 4d1455ed38..4af09cc49b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/doc.go b/vendor/k8s.io/client-go/applyconfigurations/doc.go new file mode 100644 index 0000000000..ac426c6075 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/doc.go @@ -0,0 +1,151 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package applyconfigurations provides typesafe go representations of the apply +configurations that are used to constructs Server-side Apply requests. + +# Basics + +The Apply functions in the typed client (see the k8s.io/client-go/kubernetes/typed packages) offer +a direct and typesafe way of calling Server-side Apply. Each Apply function takes an "apply +configuration" type as an argument, which is a structured representation of an Apply request. For +example: + + import ( + ... + v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1" + ) + hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns). + WithSpec(v1ac.HorizontalPodAutoscalerSpec(). + WithMinReplicas(0) + ) + return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true}) + +Note in this example that HorizontalPodAutoscaler is imported from an "applyconfigurations" +package. Each "apply configuration" type represents the same Kubernetes object kind as the +corresponding go struct, but where all fields are pointers to make them optional, allowing apply +requests to be accurately represented. For example, this when the apply configuration in the above +example is marshalled to YAML, it produces: + + apiVersion: autoscaling/v1 + kind: HorizontalPodAutoscaler + metadata: + name: myHPA + namespace: myNamespace + spec: + minReplicas: 0 + +To understand why this is needed, the above YAML cannot be produced by the +v1.HorizontalPodAutoscaler go struct. Take for example: + + hpa := v1.HorizontalPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "autoscaling/v1", + Kind: "HorizontalPodAutoscaler", + }, + ObjectMeta: ObjectMeta{ + Namespace: ns, + Name: autoscalerName, + }, + Spec: v1.HorizontalPodAutoscalerSpec{ + MinReplicas: pointer.Int32Ptr(0), + }, + } + +The above code attempts to declare the same apply configuration as shown in the previous examples, +but when marshalled to YAML, produces: + + kind: HorizontalPodAutoscaler + apiVersion: autoscaling/v1 + metadata: + name: myHPA + namespace: myNamespace + creationTimestamp: null + spec: + scaleTargetRef: + kind: "" + name: "" + minReplicas: 0 + maxReplicas: 0 + +Which, among other things, contains spec.maxReplicas set to 0. This is almost certainly not what +the caller intended (the intended apply configuration says nothing about the maxReplicas field), +and could have serious consequences on a production system: it directs the autoscaler to downscale +to zero pods. The problem here originates from the fact that the go structs contain required fields +that are zero valued if not set explicitly. The go structs work as intended for create and update +operations, but are fundamentally incompatible with apply, which is why we have introduced the +generated "apply configuration" types. + +The "apply configurations" also have convenience With functions that make it easier to +build apply requests. This allows developers to set fields without having to deal with the fact that +all the fields in the "apply configuration" types are pointers, and are inconvenient to set using +go. For example "MinReplicas: &0" is not legal go code, so without the With functions, developers +would work around this problem by using a library, .e.g. "MinReplicas: pointer.Int32Ptr(0)", but +string enumerations like corev1.Protocol are still a problem since they cannot be supported by a +general purpose library. In addition to the convenience, the With functions also isolate +developers from the underlying representation, which makes it safer for the underlying +representation to be changed to support additional features in the future. + +# Controller Support + +The new client-go support makes it much easier to use Server-side Apply in controllers, by either of +two mechanisms. + +Mechanism 1: + +When authoring new controllers to use Server-side Apply, a good approach is to have the controller +recreate the apply configuration for an object each time it reconciles that object. This ensures +that the controller fully reconciles all the fields that it is responsible for. Controllers +typically should unconditionally set all the fields they own by setting "Force: true" in the +ApplyOptions. Controllers must also provide a FieldManager name that is unique to the +reconciliation loop that apply is called from. + +When upgrading existing controllers to use Server-side Apply the same approach often works +well--migrate the controllers to recreate the apply configuration each time it reconciles any +object. For cases where this does not work well, see Mechanism 2. + +Mechanism 2: + +When upgrading existing controllers to use Server-side Apply, the controller might have multiple +code paths that update different parts of an object depending on various conditions. Migrating a +controller like this to Server-side Apply can be risky because if the controller forgets to include +any fields in an apply configuration that is included in a previous apply request, a field can be +accidentally deleted. For such cases, an alternative to mechanism 1 is to replace any controller +reconciliation code that performs a "read/modify-in-place/update" (or patch) workflow with a +"extract/modify-in-place/apply" workflow. Here's an example of the new workflow: + + fieldMgr := "my-field-manager" + deploymentClient := clientset.AppsV1().Deployments("default") + // read, could also be read from a shared informer + deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{}) + if err != nil { + // handle error + } + // extract + deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr) + if err != nil { + // handle error + } + // modify-in-place + deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container(). + WithName("modify-slice"). + WithImage("nginx:1.14.2"), + ) + // apply + applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr}) +*/ +package applyconfigurations // import "k8s.io/client-go/applyconfigurations" diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go index 767e3dfc73..a6e98d1c82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -49,7 +49,7 @@ type EventApplyConfiguration struct { DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go index e66fb41271..18069c0d1b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go index cfc4a851f3..890d95748b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -49,7 +49,7 @@ type EventApplyConfiguration struct { DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go index 640a265172..75d936e8be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go index eae399d323..ff778529c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go index bbf718f0f2..9b8057e69d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1beta1.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go index b5d7a0c161..d628969187 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go index be6b3b2853..373f9ef97a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go index 2c827e62d4..e597b15a6a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go index 878083f821..6badc64d82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go index d8a214b7fc..79e109a779 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go index 5e18476bdc..5531c756f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go index f8d1cf5d25..adc023a34d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go index 7c17b40722..2d88406eb9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go index 361605d8cd..3826e0dddc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go index 3137bc5eb0..1245452237 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go index 46c541048d..6738bf07bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go index f19c2f2ee2..9d386f1608 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go index 20bf637805..12dbc35969 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go index e16dd23633..e896ab3415 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go index 0836537979..4ee3f01617 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go index 015541eeb9..dc676f7b60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go index 2d03c7b132..4a64124755 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go index 1ab4d8bb73..58fbde8b35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go index faa7e2446f..3aed616889 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go index 8ca93a0bc2..63648cd464 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go index a90d3b2207..4a671130b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index 27ea5d9dde..fb1f95a6d6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct { Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go index 6335ec375d..ca3e174f93 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go index 2ecc4c8c65..1607137204 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go index c69b281225..8a0fa57415 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct { IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go index 0140d771bf..6bc1c1977b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { Protocol *v1.Protocol `json:"protocol,omitempty"` @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct { EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go index 179e4bd024..4454329c5b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct { PolicyTypes []extensionsv1beta1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go index b2afc835d8..24c6b6ad1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go index b717365175..21a25ae81f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1beta1.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go index 5d0c570149..27653dd1af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go index 45dc4bf319..9a5b468a3f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go index 131e57a39d..775f82eef8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go index 3aa5e2f891..4352f7fac7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go index dde5f064b0..244701a5e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go index 60a1a8430c..101aa055b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -34,7 +34,7 @@ type ScaleApplyConfiguration struct { Status *v1beta1.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleAp b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go index cd21214f5a..4e5805f394 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go index d9c8a79cc8..0f3b61af97 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go index 8809fafbae..9e3978af5b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go index 808ab09a55..5f26a66d2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go index 2785f5baf3..4efd5d2875 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go index 7c61360a53..6f951967e8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go index 92a03d8628..0be9eddfd6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go index c19f097035..8e27642985 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go index 03ff6d9103..454ed8beb4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go index d9f8c2eccf..29c26b3406 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go index b193efa8bf..088afdc584 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go index e8a1b97c9f..bcce2679c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go index 6ce588c8d9..42ccbfbf9d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go index 0638aee8b8..f445713f0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go index 5d88749593..2262dedca9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go index 322871edc6..ff650bc3d5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go index 69fd2c23cc..7488f9bbe2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go index 0991ce9445..7428582a82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go index 55787ca767..58ad10764b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go index f02b03bdc7..1ec77ae89b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go index 2d17c111c6..fd90067d4d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go index 0710480900..45ccc5cb75 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go index 6dc1bb4d68..29a8999b8d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go index f44313f54e..09bd258905 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go index b62e9a22ff..d1c3dbec6f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta1.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go index 8d72c2d0d7..1d6e8fc58e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go index 6bc6d0543a..5ad8a432b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go index 95b416e426..cc274fe2f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go index 6f57169e1f..0fe5feca12 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go index 86e1bef6b9..66f3276010 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go index 594ebc9912..3c571ccb06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go index ea5b266b4c..32a082dc76 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go index 84324dbfdc..c4243f874c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go index 59bc610510..1ad4a554b7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go index c44bcc08b4..b5e773e82a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go index 19146d9f66..b013845f43 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go index 3c27e6aa62..875b01efec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go index 5e6e6e7b01..85a8b88630 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go index 2e12ee1cc0..5c67dad759 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go index f5a146a9b1..439e5ff753 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go index af571029fc..b5c231f6d2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta1.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go index 35bf27a593..bc2deae4c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go index d6bc330fe7..0c02d9b389 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go index 924f966d48..e3c4b97a7b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go index 63a5f0aa30..ffc3af950a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go index 04dfcbf11a..44571d263d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go index a5477e2768..6eab63bfa9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go index 67c5be2cbe..70ac997e45 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go index b670f2cfd9..25207d7c1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go index 563b185c74..298dd46370 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go index a9b7661fb2..38a513d306 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta2.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go index cb8ba0afd6..5032ee4898 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go index 179c3979db..2bb8c87182 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go index 3256b36300..7d52ca2c2a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go index f742adeff0..ddb17e9843 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go index 581b451ffd..bbf718b60f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go index 994a8a16a2..c083ad0ba6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go index b55e32be00..7a1f8790b9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go index 06246fb27e..19c34c5f83 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go index b67ea1c7f9..070d2ed465 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go index b6cfdcad3b..c0d44721cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go index 7030785b8c..2cfaab43d8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go @@ -22,7 +22,7 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta2.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go index 8c77b3e8a2..c249f042da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go index b03c11d0d9..b9bf6993af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go index cd45725932..49d84bd866 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go index c95635360e..1f69c43b23 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go index 0ef3a2c921..41d623aeb8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta3.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go index e077ed3fde..7141f6a6a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go index 18db1c9325..294ddc9098 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go index b919b711b3..6576e716ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go index 269a48721c..bd98dd683c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go index b7a64ebfee..8deaabdebd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta3.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go index ecb47f52cf..2dd0d2b068 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go index e30aace194..cc64dc585b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go index 6fbbbea8fe..e7d1a3a5f8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go index 6e36b6a079..8e9687bb90 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go index cb827b1e62..566aaa916b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go index 5b0680d912..9fa1112ce6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go index 0ee9e306eb..be2436457e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go index fc86c44431..f9a3c6d1a6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go index 72623ffe49..e38f711db0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go index e2d6b1b213..a5ed40c2ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go index f13b8f3ec5..c412b2a7a2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go @@ -22,7 +22,7 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta3.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go index 3db3abbc1a..7b3ec2ba82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go new file mode 100644 index 0000000000..91944002d4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageReviewApplyConfiguration represents a declarative configuration of the ImageReview type for use +// with apply. +type ImageReviewApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageReviewSpecApplyConfiguration `json:"spec,omitempty"` + Status *ImageReviewStatusApplyConfiguration `json:"status,omitempty"` +} + +// ImageReview constructs a declarative configuration of the ImageReview type for use with +// apply. +func ImageReview(name string) *ImageReviewApplyConfiguration { + b := &ImageReviewApplyConfiguration{} + b.WithName(name) + b.WithKind("ImageReview") + b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1") + return b +} + +// ExtractImageReview extracts the applied configuration owned by fieldManager from +// imageReview. If no managedFields are found in imageReview for fieldManager, a +// ImageReviewApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageReview must be a unmodified ImageReview API object that was retrieved from the Kubernetes API. +// ExtractImageReview provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) { + return extractImageReview(imageReview, fieldManager, "") +} + +// ExtractImageReviewStatus is the same as ExtractImageReview except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageReviewStatus(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) { + return extractImageReview(imageReview, fieldManager, "status") +} + +func extractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string, subresource string) (*ImageReviewApplyConfiguration, error) { + b := &ImageReviewApplyConfiguration{} + err := managedfields.ExtractInto(imageReview, internal.Parser().Type("io.k8s.api.imagepolicy.v1alpha1.ImageReview"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageReview.Name) + + b.WithKind("ImageReview") + b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithKind(value string) *ImageReviewApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithAPIVersion(value string) *ImageReviewApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithName(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithGenerateName(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithNamespace(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithUID(value types.UID) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithResourceVersion(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithGeneration(value int64) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageReviewApplyConfiguration) WithLabels(entries map[string]string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageReviewApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageReviewApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageReviewApplyConfiguration) WithFinalizers(values ...string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ImageReviewApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithSpec(value *ImageReviewSpecApplyConfiguration) *ImageReviewApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithStatus(value *ImageReviewStatusApplyConfiguration) *ImageReviewApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageReviewApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go new file mode 100644 index 0000000000..adfdb32584 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ImageReviewContainerSpecApplyConfiguration represents a declarative configuration of the ImageReviewContainerSpec type for use +// with apply. +type ImageReviewContainerSpecApplyConfiguration struct { + Image *string `json:"image,omitempty"` +} + +// ImageReviewContainerSpecApplyConfiguration constructs a declarative configuration of the ImageReviewContainerSpec type for use with +// apply. +func ImageReviewContainerSpec() *ImageReviewContainerSpecApplyConfiguration { + return &ImageReviewContainerSpecApplyConfiguration{} +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *ImageReviewContainerSpecApplyConfiguration) WithImage(value string) *ImageReviewContainerSpecApplyConfiguration { + b.Image = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go new file mode 100644 index 0000000000..7efc36a321 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ImageReviewSpecApplyConfiguration represents a declarative configuration of the ImageReviewSpec type for use +// with apply. +type ImageReviewSpecApplyConfiguration struct { + Containers []ImageReviewContainerSpecApplyConfiguration `json:"containers,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// ImageReviewSpecApplyConfiguration constructs a declarative configuration of the ImageReviewSpec type for use with +// apply. +func ImageReviewSpec() *ImageReviewSpecApplyConfiguration { + return &ImageReviewSpecApplyConfiguration{} +} + +// WithContainers adds the given value to the Containers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Containers field. +func (b *ImageReviewSpecApplyConfiguration) WithContainers(values ...*ImageReviewContainerSpecApplyConfiguration) *ImageReviewSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithContainers") + } + b.Containers = append(b.Containers, *values[i]) + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageReviewSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewSpecApplyConfiguration { + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageReviewSpecApplyConfiguration) WithNamespace(value string) *ImageReviewSpecApplyConfiguration { + b.Namespace = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go new file mode 100644 index 0000000000..e26a427e69 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ImageReviewStatusApplyConfiguration represents a declarative configuration of the ImageReviewStatus type for use +// with apply. +type ImageReviewStatusApplyConfiguration struct { + Allowed *bool `json:"allowed,omitempty"` + Reason *string `json:"reason,omitempty"` + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty"` +} + +// ImageReviewStatusApplyConfiguration constructs a declarative configuration of the ImageReviewStatus type for use with +// apply. +func ImageReviewStatus() *ImageReviewStatusApplyConfiguration { + return &ImageReviewStatusApplyConfiguration{} +} + +// WithAllowed sets the Allowed field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Allowed field is set to the value of the last call. +func (b *ImageReviewStatusApplyConfiguration) WithAllowed(value bool) *ImageReviewStatusApplyConfiguration { + b.Allowed = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ImageReviewStatusApplyConfiguration) WithReason(value string) *ImageReviewStatusApplyConfiguration { + b.Reason = &value + return b +} + +// WithAuditAnnotations puts the entries into the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the AuditAnnotations field, +// overwriting an existing map entries in AuditAnnotations field with the same key. +func (b *ImageReviewStatusApplyConfiguration) WithAuditAnnotations(entries map[string]string) *ImageReviewStatusApplyConfiguration { + if b.AuditAnnotations == nil && len(entries) > 0 { + b.AuditAnnotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.AuditAnnotations[k] = v + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 47bfb44e0c..43c9ae05a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -4356,6 +4356,54 @@ var schemaYAML = typed.YAMLObject(`types: - name: leaseTransitions type: scalar: numeric + - name: preferredHolder + type: + scalar: string + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidate + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec + default: {} +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec + map: + fields: + - name: binaryVersion + type: + scalar: string + - name: emulationVersion + type: + scalar: string + - name: leaseName + type: + scalar: string + default: "" + - name: pingTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: preferredStrategies + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: renewTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime @@ -4391,9 +4439,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: leaseTransitions type: scalar: numeric + - name: preferredHolder + type: + scalar: string - name: renewTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string - name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource map: fields: @@ -4454,6 +4508,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: cachingMode type: scalar: string + default: ReadWrite - name: diskName type: scalar: string @@ -4465,12 +4520,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: ext4 - name: kind type: scalar: string + default: Shared - name: readOnly type: scalar: boolean + default: false - name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource map: fields: @@ -4655,15 +4713,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.core.v1.ClaimSource - map: - fields: - - name: resourceClaimName - type: - scalar: string - - name: resourceClaimTemplateName - type: - scalar: string - name: io.k8s.api.core.v1.ClientIPConfig map: fields: @@ -5047,6 +5096,14 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: allocatedResourcesStatus + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceStatus + elementRelationship: associative + keys: + - name - name: containerID type: scalar: string @@ -5084,6 +5141,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.ContainerState default: {} + - name: user + type: + namedType: io.k8s.api.core.v1.ContainerUser - name: volumeMounts type: list: @@ -5092,6 +5152,12 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - mountPath +- name: io.k8s.api.core.v1.ContainerUser + map: + fields: + - name: linux + type: + namedType: io.k8s.api.core.v1.LinuxContainerUser - name: io.k8s.api.core.v1.DaemonEndpoint map: fields: @@ -5661,6 +5727,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.HostPathVolumeSource map: fields: @@ -5693,6 +5760,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -5735,6 +5803,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -5755,6 +5824,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ImageVolumeSource + map: + fields: + - name: pullPolicy + type: + scalar: string + - name: reference + type: + scalar: string - name: io.k8s.api.core.v1.KeyToPath map: fields: @@ -5851,6 +5929,23 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.LimitRangeItem elementRelationship: atomic +- name: io.k8s.api.core.v1.LinuxContainerUser + map: + fields: + - name: gid + type: + scalar: numeric + default: 0 + - name: supplementalGroups + type: + list: + elementType: + scalar: numeric + elementRelationship: atomic + - name: uid + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.LoadBalancerIngress map: fields: @@ -6079,6 +6174,12 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.DaemonEndpoint default: {} +- name: io.k8s.api.core.v1.NodeFeatures + map: + fields: + - name: supplementalGroupsPolicy + type: + scalar: boolean - name: io.k8s.api.core.v1.NodeRuntimeHandler map: fields: @@ -6095,6 +6196,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: recursiveReadOnlyMounts type: scalar: boolean + - name: userNamespaces + type: + scalar: boolean - name: io.k8s.api.core.v1.NodeSelector map: fields: @@ -6204,6 +6308,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.NodeDaemonEndpoints default: {} + - name: features + type: + namedType: io.k8s.api.core.v1.NodeFeatures - name: images type: list: @@ -6747,6 +6854,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.PodOS map: fields: @@ -6768,10 +6876,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - - name: source + - name: resourceClaimName type: - namedType: io.k8s.api.core.v1.ClaimSource - default: {} + scalar: string + - name: resourceClaimTemplateName + type: + scalar: string - name: io.k8s.api.core.v1.PodResourceClaimStatus map: fields: @@ -6822,6 +6932,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: numeric elementRelationship: atomic + - name: supplementalGroupsPolicy + type: + scalar: string - name: sysctls type: list: @@ -7233,6 +7346,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -7242,6 +7356,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -7251,6 +7366,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.RBDVolumeSource map: fields: @@ -7264,6 +7380,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -7273,6 +7390,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -7282,6 +7400,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.ReplicationController map: fields: @@ -7375,6 +7494,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: request + type: + scalar: string - name: io.k8s.api.core.v1.ResourceFieldSelector map: fields: @@ -7389,6 +7511,16 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceHealth + map: + fields: + - name: health + type: + scalar: string + - name: resourceID + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ResourceQuota map: fields: @@ -7461,6 +7593,21 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.ResourceStatus + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resources + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceHealth + elementRelationship: associative + keys: + - resourceID - name: io.k8s.api.core.v1.SELinuxOptions map: fields: @@ -7482,6 +7629,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -7501,6 +7649,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -7517,6 +7666,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -7536,6 +7686,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -8157,6 +8308,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostPath type: namedType: io.k8s.api.core.v1.HostPathVolumeSource + - name: image + type: + namedType: io.k8s.api.core.v1.ImageVolumeSource - name: iscsi type: namedType: io.k8s.api.core.v1.ISCSIVolumeSource @@ -10939,6 +11093,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric + elementRelationship: atomic - name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: @@ -11040,6 +11195,29 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.networking.v1beta1.HTTPIngressPath elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1beta1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1beta1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1beta1.ParentReference - name: io.k8s.api.networking.v1beta1.Ingress map: fields: @@ -11206,6 +11384,62 @@ var schemaYAML = typed.YAMLObject(`types: - name: secretName type: scalar: string +- name: io.k8s.api.networking.v1beta1.ParentReference + map: + fields: + - name: group + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string +- name: io.k8s.api.networking.v1beta1.ServiceCIDR + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRSpec + default: {} + - name: status + type: + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRStatus + default: {} +- name: io.k8s.api.networking.v1beta1.ServiceCIDRSpec + map: + fields: + - name: cidrs + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.ServiceCIDRStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: io.k8s.api.node.v1.Overhead map: fields: @@ -12010,53 +12244,81 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha2.AllocationResult +- name: io.k8s.api.resource.v1alpha3.AllocationResult map: fields: - - name: availableOnNodes + - name: controller + type: + scalar: string + - name: devices + type: + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult + default: {} + - name: nodeSelector type: namedType: io.k8s.api.core.v1.NodeSelector - - name: resourceHandles +- name: io.k8s.api.resource.v1alpha3.BasicDevice + map: + fields: + - name: attributes type: - list: + map: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceHandle - elementRelationship: atomic - - name: shareable + namedType: io.k8s.api.resource.v1alpha3.DeviceAttribute + - name: capacity type: - scalar: boolean -- name: io.k8s.api.resource.v1alpha2.DriverAllocationResult + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.resource.v1alpha3.CELDeviceSelector map: fields: - - name: namedResources - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult - - name: vendorRequestParameters + - name: expression type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.DriverRequests + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.Device map: fields: - - name: driverName + - name: basic + type: + namedType: io.k8s.api.resource.v1alpha3.BasicDevice + - name: name type: scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration - name: requests type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceRequest + scalar: string elementRelationship: atomic - - name: vendorParameters + - name: source type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationResult map: fields: - - name: name + - name: config type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration + elementRelationship: atomic + - name: results + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceAttribute map: fields: - name: bool @@ -12065,80 +12327,161 @@ var schemaYAML = typed.YAMLObject(`types: - name: int type: scalar: numeric - - name: intSlice - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice - - name: name - type: - scalar: string - default: "" - - name: quantity - type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: string type: scalar: string - - name: stringSlice - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice - name: version type: scalar: string -- name: io.k8s.api.resource.v1alpha2.NamedResourcesFilter +- name: io.k8s.api.resource.v1alpha3.DeviceClaim map: fields: - - name: selector + - name: config type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesInstance - map: - fields: - - name: attributes + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration + elementRelationship: atomic + - name: constraints type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute + namedType: io.k8s.api.resource.v1alpha3.DeviceConstraint elementRelationship: atomic - - name: name + - name: requests type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceRequest + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration map: fields: - - name: ints + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + - name: requests type: list: elementType: - scalar: numeric + scalar: string elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.NamedResourcesRequest +- name: io.k8s.api.resource.v1alpha3.DeviceClass map: fields: - - name: selector + - name: apiVersion type: scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesResources + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.DeviceClassSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration +- name: io.k8s.api.resource.v1alpha3.DeviceClassSpec map: fields: - - name: instances + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration + elementRelationship: atomic + - name: selectors type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesInstance + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice + - name: suitableNodes + type: + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.resource.v1alpha3.DeviceConstraint map: fields: - - name: strings + - name: matchAttribute + type: + scalar: string + - name: requests type: list: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext +- name: io.k8s.api.resource.v1alpha3.DeviceRequest + map: + fields: + - name: adminAccess + type: + scalar: boolean + default: false + - name: allocationMode + type: + scalar: string + - name: count + type: + scalar: numeric + - name: deviceClassName + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult + map: + fields: + - name: device + type: + scalar: string + default: "" + - name: driver + type: + scalar: string + default: "" + - name: pool + type: + scalar: string + default: "" + - name: request + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceSelector + map: + fields: + - name: cel + type: + namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector +- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: parameters + type: + namedType: __untyped_atomic_ +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContext map: fields: - name: apiVersion @@ -12153,13 +12496,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec + namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus + namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus default: {} -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec map: fields: - name: potentialNodes @@ -12171,18 +12514,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: selectedNode type: scalar: string -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus map: fields: - name: resourceClaims type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus elementRelationship: associative keys: - name -- name: io.k8s.api.resource.v1alpha2.ResourceClaim +- name: io.k8s.api.resource.v1alpha3.ResourceClaim map: fields: - name: apiVersion @@ -12197,13 +12540,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimStatus default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference +- name: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference map: fields: - name: apiGroup @@ -12221,91 +12564,47 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimParameters +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus map: fields: - - name: apiVersion - type: - scalar: string - - name: driverRequests - type: - list: - elementType: - namedType: io.k8s.api.resource.v1alpha2.DriverRequests - elementRelationship: atomic - - name: generatedFrom - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: shareable - type: - scalar: boolean -- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - map: - fields: - - name: apiGroup - type: - scalar: string - - name: kind - type: - scalar: string - default: "" - name: name type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus - map: - fields: - - name: name - type: - scalar: string - name: unsuitableNodes type: list: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec map: fields: - - name: allocationMode + - name: controller type: scalar: string - - name: parametersRef - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - - name: resourceClassName + - name: devices type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha3.DeviceClaim + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimStatus map: fields: - name: allocation type: - namedType: io.k8s.api.resource.v1alpha2.AllocationResult + namedType: io.k8s.api.resource.v1alpha3.AllocationResult - name: deallocationRequested type: scalar: boolean - - name: driverName - type: - scalar: string - name: reservedFor type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference elementRelationship: associative keys: - uid -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplate map: fields: - name: apiVersion @@ -12320,9 +12619,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec map: fields: - name: metadata @@ -12331,119 +12630,29 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec - default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClass - map: - fields: - - name: apiVersion - type: - scalar: string - - name: driverName - type: - scalar: string - default: "" - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: parametersRef - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - - name: structuredParameters - type: - scalar: boolean - - name: suitableNodes - type: - namedType: io.k8s.api.core.v1.NodeSelector -- name: io.k8s.api.resource.v1alpha2.ResourceClassParameters - map: - fields: - - name: apiVersion - type: - scalar: string - - name: filters - type: - list: - elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceFilter - elementRelationship: atomic - - name: generatedFrom - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec default: {} - - name: vendorParameters - type: - list: - elementType: - namedType: io.k8s.api.resource.v1alpha2.VendorParameters - elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference +- name: io.k8s.api.resource.v1alpha3.ResourcePool map: fields: - - name: apiGroup - type: - scalar: string - - name: kind + - name: generation type: - scalar: string - default: "" + scalar: numeric + default: 0 - name: name type: scalar: string default: "" - - name: namespace - type: - scalar: string -- name: io.k8s.api.resource.v1alpha2.ResourceFilter - map: - fields: - - name: driverName + - name: resourceSliceCount type: - scalar: string - - name: namedResources - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesFilter -- name: io.k8s.api.resource.v1alpha2.ResourceHandle - map: - fields: - - name: data - type: - scalar: string - - name: driverName - type: - scalar: string - - name: structuredData - type: - namedType: io.k8s.api.resource.v1alpha2.StructuredResourceHandle -- name: io.k8s.api.resource.v1alpha2.ResourceRequest - map: - fields: - - name: namedResources - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesRequest - - name: vendorParameters - type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.ResourceSlice + scalar: numeric + default: 0 +- name: io.k8s.api.resource.v1alpha3.ResourceSlice map: fields: - name: apiVersion type: scalar: string - - name: driverName - type: - scalar: string - default: "" - name: kind type: scalar: string @@ -12451,39 +12660,36 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: namedResources - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesResources - - name: nodeName + - name: spec type: - scalar: string -- name: io.k8s.api.resource.v1alpha2.StructuredResourceHandle + namedType: io.k8s.api.resource.v1alpha3.ResourceSliceSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceSliceSpec map: fields: - - name: nodeName + - name: allNodes type: - scalar: string - - name: results + scalar: boolean + - name: devices type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.DriverAllocationResult + namedType: io.k8s.api.resource.v1alpha3.Device elementRelationship: atomic - - name: vendorClaimParameters - type: - namedType: __untyped_atomic_ - - name: vendorClassParameters + - name: driver type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.VendorParameters - map: - fields: - - name: driverName + scalar: string + default: "" + - name: nodeName type: scalar: string - - name: parameters + - name: nodeSelector type: - namedType: __untyped_atomic_ + namedType: io.k8s.api.core.v1.NodeSelector + - name: pool + type: + namedType: io.k8s.api.resource.v1alpha3.ResourcePool + default: {} - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: @@ -13177,6 +13383,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1beta1.VolumeError +- name: io.k8s.api.storage.v1beta1.VolumeAttributesClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parameters + type: + map: + elementType: + scalar: string - name: io.k8s.api.storage.v1beta1.VolumeError map: fields: diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go index c84102cdde..466aaebb61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ConditionApplyConfiguration represents an declarative configuration of the Condition type for use +// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use // with apply. type ConditionApplyConfiguration struct { Type *string `json:"type,omitempty"` @@ -33,7 +33,7 @@ type ConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ConditionApplyConfiguration constructs an declarative configuration of the Condition type for use with +// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with // apply. func Condition() *ConditionApplyConfiguration { return &ConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go index 7a1d23114d..313bb9784d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeleteOptionsApplyConfiguration represents an declarative configuration of the DeleteOptions type for use +// DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use // with apply. type DeleteOptionsApplyConfiguration struct { TypeMetaApplyConfiguration `json:",inline"` @@ -33,7 +33,7 @@ type DeleteOptionsApplyConfiguration struct { DryRun []string `json:"dryRun,omitempty"` } -// DeleteOptionsApplyConfiguration constructs an declarative configuration of the DeleteOptions type for use with +// DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with // apply. func DeleteOptions() *DeleteOptionsApplyConfiguration { b := &DeleteOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go index 6d24bc363b..1f33c94e0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LabelSelectorApplyConfiguration represents an declarative configuration of the LabelSelector type for use +// LabelSelectorApplyConfiguration represents a declarative configuration of the LabelSelector type for use // with apply. type LabelSelectorApplyConfiguration struct { MatchLabels map[string]string `json:"matchLabels,omitempty"` MatchExpressions []LabelSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// LabelSelectorApplyConfiguration constructs an declarative configuration of the LabelSelector type for use with +// LabelSelectorApplyConfiguration constructs a declarative configuration of the LabelSelector type for use with // apply. func LabelSelector() *LabelSelectorApplyConfiguration { return &LabelSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go index ff70f365e6..bd9db9659b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LabelSelectorRequirementApplyConfiguration represents an declarative configuration of the LabelSelectorRequirement type for use +// LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use // with apply. type LabelSelectorRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -30,7 +30,7 @@ type LabelSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// LabelSelectorRequirementApplyConfiguration constructs an declarative configuration of the LabelSelectorRequirement type for use with +// LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with // apply. func LabelSelectorRequirement() *LabelSelectorRequirementApplyConfiguration { return &LabelSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go index f4d7e26812..6913df8226 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ManagedFieldsEntryApplyConfiguration represents an declarative configuration of the ManagedFieldsEntry type for use +// ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use // with apply. type ManagedFieldsEntryApplyConfiguration struct { Manager *string `json:"manager,omitempty"` @@ -34,7 +34,7 @@ type ManagedFieldsEntryApplyConfiguration struct { Subresource *string `json:"subresource,omitempty"` } -// ManagedFieldsEntryApplyConfiguration constructs an declarative configuration of the ManagedFieldsEntry type for use with +// ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with // apply. func ManagedFieldsEntry() *ManagedFieldsEntryApplyConfiguration { return &ManagedFieldsEntryApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go index 9b290e9680..a9419975ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go @@ -23,7 +23,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ObjectMetaApplyConfiguration represents an declarative configuration of the ObjectMeta type for use +// ObjectMetaApplyConfiguration represents a declarative configuration of the ObjectMeta type for use // with apply. type ObjectMetaApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -41,7 +41,7 @@ type ObjectMetaApplyConfiguration struct { Finalizers []string `json:"finalizers,omitempty"` } -// ObjectMetaApplyConfiguration constructs an declarative configuration of the ObjectMeta type for use with +// ObjectMetaApplyConfiguration constructs a declarative configuration of the ObjectMeta type for use with // apply. func ObjectMeta() *ObjectMetaApplyConfiguration { return &ObjectMetaApplyConfiguration{} @@ -169,3 +169,8 @@ func (b *ObjectMetaApplyConfiguration) WithFinalizers(values ...string) *ObjectM } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ObjectMetaApplyConfiguration) GetName() *string { + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go index b3117d6a4b..2776152322 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// OwnerReferenceApplyConfiguration represents an declarative configuration of the OwnerReference type for use +// OwnerReferenceApplyConfiguration represents a declarative configuration of the OwnerReference type for use // with apply. type OwnerReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` @@ -33,7 +33,7 @@ type OwnerReferenceApplyConfiguration struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty"` } -// OwnerReferenceApplyConfiguration constructs an declarative configuration of the OwnerReference type for use with +// OwnerReferenceApplyConfiguration constructs a declarative configuration of the OwnerReference type for use with // apply. func OwnerReference() *OwnerReferenceApplyConfiguration { return &OwnerReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go index f627733f1e..8f8b6c6b3a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// PreconditionsApplyConfiguration represents an declarative configuration of the Preconditions type for use +// PreconditionsApplyConfiguration represents a declarative configuration of the Preconditions type for use // with apply. type PreconditionsApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` } -// PreconditionsApplyConfiguration constructs an declarative configuration of the Preconditions type for use with +// PreconditionsApplyConfiguration constructs a declarative configuration of the Preconditions type for use with // apply. func Preconditions() *PreconditionsApplyConfiguration { return &PreconditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go index 877b0890e8..979044384c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TypeMetaApplyConfiguration represents an declarative configuration of the TypeMeta type for use +// TypeMetaApplyConfiguration represents a declarative configuration of the TypeMeta type for use // with apply. type TypeMetaApplyConfiguration struct { Kind *string `json:"kind,omitempty"` APIVersion *string `json:"apiVersion,omitempty"` } -// TypeMetaApplyConfiguration constructs an declarative configuration of the TypeMeta type for use with +// TypeMetaApplyConfiguration constructs a declarative configuration of the TypeMeta type for use with // apply. func TypeMeta() *TypeMetaApplyConfiguration { return &TypeMetaApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go index 07b6a67f6a..e39670f295 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/networking/v1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go index fef529d696..ad9a7a6771 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go index b5146902d4..607c26e943 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go index 5757135991..b014b7beef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { Service *IngressServiceBackendApplyConfiguration `json:"service,omitempty"` Resource *corev1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go index e33d0b2d9f..14acc7dbd8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct { Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go index a020d3a8df..0dba1ebc5d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go index ec0423e708..23e8484344 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go index 444275a127..d0feb44da4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go index 8e01a301ac..08c841f06b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go index 82b5babd9c..b6411199fc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go index 8153e88fe2..4ef871f077 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go index d0e094387c..1e13e378be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go index 399739631b..07876afd17 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressServiceBackendApplyConfiguration represents an declarative configuration of the IngressServiceBackend type for use +// IngressServiceBackendApplyConfiguration represents a declarative configuration of the IngressServiceBackend type for use // with apply. type IngressServiceBackendApplyConfiguration struct { Name *string `json:"name,omitempty"` Port *ServiceBackendPortApplyConfiguration `json:"port,omitempty"` } -// IngressServiceBackendApplyConfiguration constructs an declarative configuration of the IngressServiceBackend type for use with +// IngressServiceBackendApplyConfiguration constructs a declarative configuration of the IngressServiceBackend type for use with // apply. func IngressServiceBackend() *IngressServiceBackendApplyConfiguration { return &IngressServiceBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go index 635514ecf7..0572153aa1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go index 7131bf8d0d..bd1327c93f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go index 4d8d369f7c..44092503f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go index 1efd6edfdc..f3447a8f10 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 409507310b..3f8c8a5351 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct { Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go index e5751c4413..46e2706ece 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go index 630fe1fabe..6e98759786 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go index 909b651c04..046de3e237 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct { IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go index 73dbed1d89..581ef1c348 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { Protocol *v1.Protocol `json:"protocol,omitempty"` @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct { EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go index 882d8233a9..da5ed5d358 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct { PolicyTypes []apinetworkingv1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go index ec278960ca..517f974838 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ServiceBackendPortApplyConfiguration represents an declarative configuration of the ServiceBackendPort type for use +// ServiceBackendPortApplyConfiguration represents a declarative configuration of the ServiceBackendPort type for use // with apply. type ServiceBackendPortApplyConfiguration struct { Name *string `json:"name,omitempty"` Number *int32 `json:"number,omitempty"` } -// ServiceBackendPortApplyConfiguration constructs an declarative configuration of the ServiceBackendPort type for use with +// ServiceBackendPortApplyConfiguration constructs a declarative configuration of the ServiceBackendPort type for use with // apply. func ServiceBackendPort() *ServiceBackendPortApplyConfiguration { return &ServiceBackendPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index da6822111d..999c23fa14 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use // with apply. type IPAddressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IPAddressApplyConfiguration struct { Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` } -// IPAddress constructs an declarative configuration of the IPAddress type for use with +// IPAddress constructs a declarative configuration of the IPAddress type for use with // apply. func IPAddress(name string) *IPAddressApplyConfiguration { b := &IPAddressApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go index 064963d691..bf025a8c1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use // with apply. type IPAddressSpecApplyConfiguration struct { ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` } -// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with // apply. func IPAddressSpec() *IPAddressSpecApplyConfiguration { return &IPAddressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go index ce1049709a..d5a52d503d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use // with apply. type ParentReferenceApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -27,7 +27,7 @@ type ParentReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with // apply. func ParentReference() *ParentReferenceApplyConfiguration { return &ParentReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go index f6d0a91e00..984e049f28 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use // with apply. type ServiceCIDRApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ServiceCIDRApplyConfiguration struct { Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` } -// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with // apply. func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { b := &ServiceCIDRApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go index 302d69194c..7875ff403b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use // with apply. type ServiceCIDRSpecApplyConfiguration struct { CIDRs []string `json:"cidrs,omitempty"` } -// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with // apply. func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { return &ServiceCIDRSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go index 5afc549a65..34715e3a49 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use // with apply. type ServiceCIDRStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with // apply. func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { return &ServiceCIDRStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go index b12907e81c..61b458f7ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/networking/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go index 3137bc5eb0..1245452237 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go index 56f65c30a9..0df53ea652 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go index f19c2f2ee2..9d386f1608 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go index b65d4b3073..b0e877b57a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct { Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go index e6ca805e47..2a307a6760 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go index 51040462ca..eefbf62b87 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go index 20bf637805..12dbc35969 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go index e16dd23633..e896ab3415 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go index 0836537979..4ee3f01617 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go index 015541eeb9..dc676f7b60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go index 2d03c7b132..4a64124755 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go index 1ab4d8bb73..58fbde8b35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go index faa7e2446f..3aed616889 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go index 8ca93a0bc2..63648cd464 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..3047d79b95 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use +// with apply. +type IPAddressApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` +} + +// IPAddress constructs a declarative configuration of the IPAddress type for use with +// apply. +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} + b.WithName(name) + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b +} + +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") +} + +// ExtractIPAddressStatus is the same as ExtractIPAddress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIPAddressStatus(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") +} + +func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1beta1.IPAddress"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(iPAddress.Name) + + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go new file mode 100644 index 0000000000..76b02137d2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go index d67e4d3977..1863938f16 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go @@ -16,51 +16,51 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1beta1 -// ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use // with apply. -type ResourceClassParametersReferenceApplyConfiguration struct { - APIGroup *string `json:"apiGroup,omitempty"` - Kind *string `json:"kind,omitempty"` - Name *string `json:"name,omitempty"` +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` } -// ResourceClassParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClassParametersReference type for use with +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with // apply. -func ResourceClassParametersReference() *ResourceClassParametersReferenceApplyConfiguration { - return &ResourceClassParametersReferenceApplyConfiguration{} +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} } -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// WithGroup sets the Group field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIGroup field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.APIGroup = &value +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value return b } -// WithKind sets the Kind field in the declarative configuration to the given value +// WithResource sets the Resource field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Kind = &value +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value return b } -// WithName sets the Name field in the declarative configuration to the given value +// WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithName(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Name = &value +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value return b } -// WithNamespace sets the Namespace field in the declarative configuration to the given value +// WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithNamespace(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Namespace = &value +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go new file mode 100644 index 0000000000..4ef8e9ecac --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use +// with apply. +type ServiceCIDRApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with +// apply. +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { + b := &ServiceCIDRApplyConfiguration{} + b.WithName(name) + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b +} + +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "") +} + +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "status") +} + +func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { + b := &ServiceCIDRApplyConfiguration{} + err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1beta1.ServiceCIDR"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(serviceCIDR.Name) + + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go similarity index 52% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go index ea00bffe51..1f283532d3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go @@ -16,26 +16,26 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1beta1 -// NamedResourcesIntSliceApplyConfiguration represents an declarative configuration of the NamedResourcesIntSlice type for use +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use // with apply. -type NamedResourcesIntSliceApplyConfiguration struct { - Ints []int64 `json:"ints,omitempty"` +type ServiceCIDRSpecApplyConfiguration struct { + CIDRs []string `json:"cidrs,omitempty"` } -// NamedResourcesIntSliceApplyConfiguration constructs an declarative configuration of the NamedResourcesIntSlice type for use with +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with // apply. -func NamedResourcesIntSlice() *NamedResourcesIntSliceApplyConfiguration { - return &NamedResourcesIntSliceApplyConfiguration{} +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { + return &ServiceCIDRSpecApplyConfiguration{} } -// WithInts adds the given value to the Ints field in the declarative configuration +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ints field. -func (b *NamedResourcesIntSliceApplyConfiguration) WithInts(values ...int64) *NamedResourcesIntSliceApplyConfiguration { +// If called multiple times, values provided by each call will be appended to the CIDRs field. +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { for i := range values { - b.Ints = append(b.Ints, values[i]) + b.CIDRs = append(b.CIDRs, values[i]) } return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go new file mode 100644 index 0000000000..f2dd92404d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use +// with apply. +type ServiceCIDRStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with +// apply. +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { + return &ServiceCIDRStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go index 9eec002671..6694538fc3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go index 3c9d1fc467..6ce01a319c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go index e01db85d7b..2d084e0f59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go index 1ddaa64acc..84770a0920 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go index e680e12deb..9f139ee1b6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RuntimeClassApplyConfiguration struct { Spec *RuntimeClassSpecApplyConfiguration `json:"spec,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *RuntimeClassApplyConfiguration) WithSpec(value *RuntimeClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go index 86e8585ad3..1aa43eb132 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RuntimeClassSpecApplyConfiguration represents an declarative configuration of the RuntimeClassSpec type for use +// RuntimeClassSpecApplyConfiguration represents a declarative configuration of the RuntimeClassSpec type for use // with apply. type RuntimeClassSpecApplyConfiguration struct { RuntimeHandler *string `json:"runtimeHandler,omitempty"` @@ -26,7 +26,7 @@ type RuntimeClassSpecApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClassSpecApplyConfiguration constructs an declarative configuration of the RuntimeClassSpec type for use with +// RuntimeClassSpecApplyConfiguration constructs a declarative configuration of the RuntimeClassSpec type for use with // apply. func RuntimeClassSpec() *RuntimeClassSpecApplyConfiguration { return &RuntimeClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go index d4117d6bc7..6ce49ad866 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go index e8c4895505..cf767e702e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go index f5487665c3..fa6c9f45bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go index 10831d0ff5..23d0b97527 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go index 76a9533a6f..3a051619f8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct { DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go index 6b547c2695..a765a7b623 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct { Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go index 67d9ba6bba..2917145451 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` @@ -33,7 +33,7 @@ type PodDisruptionBudgetSpecApplyConfiguration struct { UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go index 2dd427b9e1..d0f9baf41c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go index d2a361d1b5..d4121af206 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct { DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go index cef51a279c..813b57bae7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct { Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go index 0ba3ea1c2e..405f1148b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` @@ -33,7 +33,7 @@ type PodDisruptionBudgetSpecApplyConfiguration struct { UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go index d0813590e1..e66a7fb386 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go index fda9205c21..5ae4dc37ff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go index 3a5660fe19..c5b0075ec0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go index 625ad72c44..91a9d5df31 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go index 65ee1d4fe5..a2e66d1096 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go index 97df25fb65..b51f904267 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go index 7270f07e49..e59c8e6d30 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go index ef03a48827..646a3bb194 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go index ebc87fdc45..e1d9c5cfb8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go index 63cdc3fcca..ff4aeb59e5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go index 19b1180fad..dc0e34e53b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go index a1723efc35..d3c12ec508 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go index 12143af130..89d7a2914f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go index cd256397a2..db0a4f7169 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go index a0ec20d0b1..8efcddd69d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go index 40dbc33073..4b2553117d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go index 46640dbbe9..665b42af50 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go index d52ac3db9b..e9bb68dcb6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go index cf714ecc27..5e9c238540 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go index b97cbcba2f..2f088b93e5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go index c63dc68c6b..dc630df206 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go index 53a751eb34..4b1b6112bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go index ecccdf91b1..246928553f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go index e6a02dc602..19d0420a81 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go index b616da8b13..f7c1a21a9c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go deleted file mode 100644 index bc6078aa94..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use -// with apply. -type AllocationResultApplyConfiguration struct { - ResourceHandles []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"` - AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"` - Shareable *bool `json:"shareable,omitempty"` -} - -// AllocationResultApplyConfiguration constructs an declarative configuration of the AllocationResult type for use with -// apply. -func AllocationResult() *AllocationResultApplyConfiguration { - return &AllocationResultApplyConfiguration{} -} - -// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceHandles field. -func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceHandles") - } - b.ResourceHandles = append(b.ResourceHandles, *values[i]) - } - return b -} - -// WithAvailableOnNodes sets the AvailableOnNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AvailableOnNodes field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithAvailableOnNodes(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { - b.AvailableOnNodes = value - return b -} - -// WithShareable sets the Shareable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Shareable field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithShareable(value bool) *AllocationResultApplyConfiguration { - b.Shareable = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go deleted file mode 100644 index 0c8be0e6aa..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// AllocationResultModelApplyConfiguration represents an declarative configuration of the AllocationResultModel type for use -// with apply. -type AllocationResultModelApplyConfiguration struct { - NamedResources *NamedResourcesAllocationResultApplyConfiguration `json:"namedResources,omitempty"` -} - -// AllocationResultModelApplyConfiguration constructs an declarative configuration of the AllocationResultModel type for use with -// apply. -func AllocationResultModel() *AllocationResultModelApplyConfiguration { - return &AllocationResultModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *AllocationResultModelApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *AllocationResultModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go deleted file mode 100644 index a1f082fad7..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DriverAllocationResultApplyConfiguration represents an declarative configuration of the DriverAllocationResult type for use -// with apply. -type DriverAllocationResultApplyConfiguration struct { - VendorRequestParameters *runtime.RawExtension `json:"vendorRequestParameters,omitempty"` - AllocationResultModelApplyConfiguration `json:",inline"` -} - -// DriverAllocationResultApplyConfiguration constructs an declarative configuration of the DriverAllocationResult type for use with -// apply. -func DriverAllocationResult() *DriverAllocationResultApplyConfiguration { - return &DriverAllocationResultApplyConfiguration{} -} - -// WithVendorRequestParameters sets the VendorRequestParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorRequestParameters field is set to the value of the last call. -func (b *DriverAllocationResultApplyConfiguration) WithVendorRequestParameters(value runtime.RawExtension) *DriverAllocationResultApplyConfiguration { - b.VendorRequestParameters = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *DriverAllocationResultApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *DriverAllocationResultApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go deleted file mode 100644 index 8052915784..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DriverRequestsApplyConfiguration represents an declarative configuration of the DriverRequests type for use -// with apply. -type DriverRequestsApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - VendorParameters *runtime.RawExtension `json:"vendorParameters,omitempty"` - Requests []ResourceRequestApplyConfiguration `json:"requests,omitempty"` -} - -// DriverRequestsApplyConfiguration constructs an declarative configuration of the DriverRequests type for use with -// apply. -func DriverRequests() *DriverRequestsApplyConfiguration { - return &DriverRequestsApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *DriverRequestsApplyConfiguration) WithDriverName(value string) *DriverRequestsApplyConfiguration { - b.DriverName = &value - return b -} - -// WithVendorParameters sets the VendorParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorParameters field is set to the value of the last call. -func (b *DriverRequestsApplyConfiguration) WithVendorParameters(value runtime.RawExtension) *DriverRequestsApplyConfiguration { - b.VendorParameters = &value - return b -} - -// WithRequests adds the given value to the Requests field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Requests field. -func (b *DriverRequestsApplyConfiguration) WithRequests(values ...*ResourceRequestApplyConfiguration) *DriverRequestsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRequests") - } - b.Requests = append(b.Requests, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go deleted file mode 100644 index d9545d054f..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" -) - -// NamedResourcesAttributeApplyConfiguration represents an declarative configuration of the NamedResourcesAttribute type for use -// with apply. -type NamedResourcesAttributeApplyConfiguration struct { - Name *string `json:"name,omitempty"` - NamedResourcesAttributeValueApplyConfiguration `json:",inline"` -} - -// NamedResourcesAttributeApplyConfiguration constructs an declarative configuration of the NamedResourcesAttribute type for use with -// apply. -func NamedResourcesAttribute() *NamedResourcesAttributeApplyConfiguration { - return &NamedResourcesAttributeApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithName(value string) *NamedResourcesAttributeApplyConfiguration { - b.Name = &value - return b -} - -// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the QuantityValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeApplyConfiguration { - b.QuantityValue = &value - return b -} - -// WithBoolValue sets the BoolValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BoolValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeApplyConfiguration { - b.BoolValue = &value - return b -} - -// WithIntValue sets the IntValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeApplyConfiguration { - b.IntValue = &value - return b -} - -// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration { - b.IntSliceValue = value - return b -} - -// WithStringValue sets the StringValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeApplyConfiguration { - b.StringValue = &value - return b -} - -// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration { - b.StringSliceValue = value - return b -} - -// WithVersionValue sets the VersionValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VersionValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeApplyConfiguration { - b.VersionValue = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go deleted file mode 100644 index e0b19650a9..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" -) - -// NamedResourcesAttributeValueApplyConfiguration represents an declarative configuration of the NamedResourcesAttributeValue type for use -// with apply. -type NamedResourcesAttributeValueApplyConfiguration struct { - QuantityValue *resource.Quantity `json:"quantity,omitempty"` - BoolValue *bool `json:"bool,omitempty"` - IntValue *int64 `json:"int,omitempty"` - IntSliceValue *NamedResourcesIntSliceApplyConfiguration `json:"intSlice,omitempty"` - StringValue *string `json:"string,omitempty"` - StringSliceValue *NamedResourcesStringSliceApplyConfiguration `json:"stringSlice,omitempty"` - VersionValue *string `json:"version,omitempty"` -} - -// NamedResourcesAttributeValueApplyConfiguration constructs an declarative configuration of the NamedResourcesAttributeValue type for use with -// apply. -func NamedResourcesAttributeValue() *NamedResourcesAttributeValueApplyConfiguration { - return &NamedResourcesAttributeValueApplyConfiguration{} -} - -// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the QuantityValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeValueApplyConfiguration { - b.QuantityValue = &value - return b -} - -// WithBoolValue sets the BoolValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BoolValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeValueApplyConfiguration { - b.BoolValue = &value - return b -} - -// WithIntValue sets the IntValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeValueApplyConfiguration { - b.IntValue = &value - return b -} - -// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration { - b.IntSliceValue = value - return b -} - -// WithStringValue sets the StringValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeValueApplyConfiguration { - b.StringValue = &value - return b -} - -// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration { - b.StringSliceValue = value - return b -} - -// WithVersionValue sets the VersionValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VersionValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeValueApplyConfiguration { - b.VersionValue = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go deleted file mode 100644 index e483d8622f..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesFilterApplyConfiguration represents an declarative configuration of the NamedResourcesFilter type for use -// with apply. -type NamedResourcesFilterApplyConfiguration struct { - Selector *string `json:"selector,omitempty"` -} - -// NamedResourcesFilterApplyConfiguration constructs an declarative configuration of the NamedResourcesFilter type for use with -// apply. -func NamedResourcesFilter() *NamedResourcesFilterApplyConfiguration { - return &NamedResourcesFilterApplyConfiguration{} -} - -// WithSelector sets the Selector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Selector field is set to the value of the last call. -func (b *NamedResourcesFilterApplyConfiguration) WithSelector(value string) *NamedResourcesFilterApplyConfiguration { - b.Selector = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go deleted file mode 100644 index 5adfd84ee5..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesRequestApplyConfiguration represents an declarative configuration of the NamedResourcesRequest type for use -// with apply. -type NamedResourcesRequestApplyConfiguration struct { - Selector *string `json:"selector,omitempty"` -} - -// NamedResourcesRequestApplyConfiguration constructs an declarative configuration of the NamedResourcesRequest type for use with -// apply. -func NamedResourcesRequest() *NamedResourcesRequestApplyConfiguration { - return &NamedResourcesRequestApplyConfiguration{} -} - -// WithSelector sets the Selector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Selector field is set to the value of the last call. -func (b *NamedResourcesRequestApplyConfiguration) WithSelector(value string) *NamedResourcesRequestApplyConfiguration { - b.Selector = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go deleted file mode 100644 index f01ff8699a..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesResourcesApplyConfiguration represents an declarative configuration of the NamedResourcesResources type for use -// with apply. -type NamedResourcesResourcesApplyConfiguration struct { - Instances []NamedResourcesInstanceApplyConfiguration `json:"instances,omitempty"` -} - -// NamedResourcesResourcesApplyConfiguration constructs an declarative configuration of the NamedResourcesResources type for use with -// apply. -func NamedResourcesResources() *NamedResourcesResourcesApplyConfiguration { - return &NamedResourcesResourcesApplyConfiguration{} -} - -// WithInstances adds the given value to the Instances field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Instances field. -func (b *NamedResourcesResourcesApplyConfiguration) WithInstances(values ...*NamedResourcesInstanceApplyConfiguration) *NamedResourcesResourcesApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithInstances") - } - b.Instances = append(b.Instances, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go deleted file mode 100644 index 1e93873546..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesStringSliceApplyConfiguration represents an declarative configuration of the NamedResourcesStringSlice type for use -// with apply. -type NamedResourcesStringSliceApplyConfiguration struct { - Strings []string `json:"strings,omitempty"` -} - -// NamedResourcesStringSliceApplyConfiguration constructs an declarative configuration of the NamedResourcesStringSlice type for use with -// apply. -func NamedResourcesStringSlice() *NamedResourcesStringSliceApplyConfiguration { - return &NamedResourcesStringSliceApplyConfiguration{} -} - -// WithStrings adds the given value to the Strings field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Strings field. -func (b *NamedResourcesStringSliceApplyConfiguration) WithStrings(values ...string) *NamedResourcesStringSliceApplyConfiguration { - for i := range values { - b.Strings = append(b.Strings, values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index ea13570e33..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClaimParametersApplyConfiguration represents an declarative configuration of the ResourceClaimParameters type for use -// with apply. -type ResourceClaimParametersApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - GeneratedFrom *ResourceClaimParametersReferenceApplyConfiguration `json:"generatedFrom,omitempty"` - Shareable *bool `json:"shareable,omitempty"` - DriverRequests []DriverRequestsApplyConfiguration `json:"driverRequests,omitempty"` -} - -// ResourceClaimParameters constructs an declarative configuration of the ResourceClaimParameters type for use with -// apply. -func ResourceClaimParameters(name, namespace string) *ResourceClaimParametersApplyConfiguration { - b := &ResourceClaimParametersApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("ResourceClaimParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClaimParameters extracts the applied configuration owned by fieldManager from -// resourceClaimParameters. If no managedFields are found in resourceClaimParameters for fieldManager, a -// ResourceClaimParametersApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClaimParameters must be a unmodified ResourceClaimParameters API object that was retrieved from the Kubernetes API. -// ExtractResourceClaimParameters provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClaimParameters(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string) (*ResourceClaimParametersApplyConfiguration, error) { - return extractResourceClaimParameters(resourceClaimParameters, fieldManager, "") -} - -// ExtractResourceClaimParametersStatus is the same as ExtractResourceClaimParameters except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClaimParametersStatus(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string) (*ResourceClaimParametersApplyConfiguration, error) { - return extractResourceClaimParameters(resourceClaimParameters, fieldManager, "status") -} - -func extractResourceClaimParameters(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string, subresource string) (*ResourceClaimParametersApplyConfiguration, error) { - b := &ResourceClaimParametersApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimParameters, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimParameters"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClaimParameters.Name) - b.WithNamespace(resourceClaimParameters.Namespace) - - b.WithKind("ResourceClaimParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithKind(value string) *ResourceClaimParametersApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithAPIVersion(value string) *ResourceClaimParametersApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithName(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithGenerateName(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithNamespace(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithUID(value types.UID) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithResourceVersion(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithGeneration(value int64) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClaimParametersApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClaimParametersApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClaimParametersApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClaimParametersApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClaimParametersApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithGeneratedFrom sets the GeneratedFrom field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GeneratedFrom field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithGeneratedFrom(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimParametersApplyConfiguration { - b.GeneratedFrom = value - return b -} - -// WithShareable sets the Shareable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Shareable field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithShareable(value bool) *ResourceClaimParametersApplyConfiguration { - b.Shareable = &value - return b -} - -// WithDriverRequests adds the given value to the DriverRequests field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DriverRequests field. -func (b *ResourceClaimParametersApplyConfiguration) WithDriverRequests(values ...*DriverRequestsApplyConfiguration) *ResourceClaimParametersApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithDriverRequests") - } - b.DriverRequests = append(b.DriverRequests, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go deleted file mode 100644 index 27820ede60..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use -// with apply. -type ResourceClaimParametersReferenceApplyConfiguration struct { - APIGroup *string `json:"apiGroup,omitempty"` - Kind *string `json:"kind,omitempty"` - Name *string `json:"name,omitempty"` -} - -// ResourceClaimParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimParametersReference type for use with -// apply. -func ResourceClaimParametersReference() *ResourceClaimParametersReferenceApplyConfiguration { - return &ResourceClaimParametersReferenceApplyConfiguration{} -} - -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIGroup field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.APIGroup = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.Kind = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithName(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.Name = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go deleted file mode 100644 index 0c73e64e9e..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" -) - -// ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use -// with apply. -type ResourceClaimSpecApplyConfiguration struct { - ResourceClassName *string `json:"resourceClassName,omitempty"` - ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` -} - -// ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with -// apply. -func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { - return &ResourceClaimSpecApplyConfiguration{} -} - -// WithResourceClassName sets the ResourceClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClassName field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithResourceClassName(value string) *ResourceClaimSpecApplyConfiguration { - b.ResourceClassName = &value - return b -} - -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParametersRef field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimSpecApplyConfiguration { - b.ParametersRef = value - return b -} - -// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllocationMode field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { - b.AllocationMode = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go deleted file mode 100644 index 364fda9d00..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClassApplyConfiguration represents an declarative configuration of the ResourceClass type for use -// with apply. -type ResourceClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - DriverName *string `json:"driverName,omitempty"` - ParametersRef *ResourceClassParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - SuitableNodes *corev1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` - StructuredParameters *bool `json:"structuredParameters,omitempty"` -} - -// ResourceClass constructs an declarative configuration of the ResourceClass type for use with -// apply. -func ResourceClass(name string) *ResourceClassApplyConfiguration { - b := &ResourceClassApplyConfiguration{} - b.WithName(name) - b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClass extracts the applied configuration owned by fieldManager from -// resourceClass. If no managedFields are found in resourceClass for fieldManager, a -// ResourceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClass must be a unmodified ResourceClass API object that was retrieved from the Kubernetes API. -// ExtractResourceClass provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { - return extractResourceClass(resourceClass, fieldManager, "") -} - -// ExtractResourceClassStatus is the same as ExtractResourceClass except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { - return extractResourceClass(resourceClass, fieldManager, "status") -} - -func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { - b := &ResourceClassApplyConfiguration{} - err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClass.Name) - - b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithKind(value string) *ResourceClassApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithAPIVersion(value string) *ResourceClassApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithName(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithGenerateName(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithNamespace(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithUID(value types.UID) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithResourceVersion(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithGeneration(value int64) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClassApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClassApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClassApplyConfiguration) WithFinalizers(values ...string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDriverName(value string) *ResourceClassApplyConfiguration { - b.DriverName = &value - return b -} - -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParametersRef field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithParametersRef(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassApplyConfiguration { - b.ParametersRef = value - return b -} - -// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SuitableNodes field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithSuitableNodes(value *corev1.NodeSelectorApplyConfiguration) *ResourceClassApplyConfiguration { - b.SuitableNodes = value - return b -} - -// WithStructuredParameters sets the StructuredParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StructuredParameters field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithStructuredParameters(value bool) *ResourceClassApplyConfiguration { - b.StructuredParameters = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 028d0d612d..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,277 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClassParametersApplyConfiguration represents an declarative configuration of the ResourceClassParameters type for use -// with apply. -type ResourceClassParametersApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - GeneratedFrom *ResourceClassParametersReferenceApplyConfiguration `json:"generatedFrom,omitempty"` - VendorParameters []VendorParametersApplyConfiguration `json:"vendorParameters,omitempty"` - Filters []ResourceFilterApplyConfiguration `json:"filters,omitempty"` -} - -// ResourceClassParameters constructs an declarative configuration of the ResourceClassParameters type for use with -// apply. -func ResourceClassParameters(name, namespace string) *ResourceClassParametersApplyConfiguration { - b := &ResourceClassParametersApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("ResourceClassParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClassParameters extracts the applied configuration owned by fieldManager from -// resourceClassParameters. If no managedFields are found in resourceClassParameters for fieldManager, a -// ResourceClassParametersApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClassParameters must be a unmodified ResourceClassParameters API object that was retrieved from the Kubernetes API. -// ExtractResourceClassParameters provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClassParameters(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string) (*ResourceClassParametersApplyConfiguration, error) { - return extractResourceClassParameters(resourceClassParameters, fieldManager, "") -} - -// ExtractResourceClassParametersStatus is the same as ExtractResourceClassParameters except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClassParametersStatus(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string) (*ResourceClassParametersApplyConfiguration, error) { - return extractResourceClassParameters(resourceClassParameters, fieldManager, "status") -} - -func extractResourceClassParameters(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string, subresource string) (*ResourceClassParametersApplyConfiguration, error) { - b := &ResourceClassParametersApplyConfiguration{} - err := managedfields.ExtractInto(resourceClassParameters, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClassParameters"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClassParameters.Name) - b.WithNamespace(resourceClassParameters.Namespace) - - b.WithKind("ResourceClassParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithKind(value string) *ResourceClassParametersApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithAPIVersion(value string) *ResourceClassParametersApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithName(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithGenerateName(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithNamespace(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithUID(value types.UID) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithResourceVersion(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithGeneration(value int64) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClassParametersApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClassParametersApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClassParametersApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClassParametersApplyConfiguration) WithFinalizers(values ...string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClassParametersApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithGeneratedFrom sets the GeneratedFrom field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GeneratedFrom field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithGeneratedFrom(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassParametersApplyConfiguration { - b.GeneratedFrom = value - return b -} - -// WithVendorParameters adds the given value to the VendorParameters field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the VendorParameters field. -func (b *ResourceClassParametersApplyConfiguration) WithVendorParameters(values ...*VendorParametersApplyConfiguration) *ResourceClassParametersApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithVendorParameters") - } - b.VendorParameters = append(b.VendorParameters, *values[i]) - } - return b -} - -// WithFilters adds the given value to the Filters field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Filters field. -func (b *ResourceClassParametersApplyConfiguration) WithFilters(values ...*ResourceFilterApplyConfiguration) *ResourceClassParametersApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithFilters") - } - b.Filters = append(b.Filters, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go deleted file mode 100644 index 15371b44a9..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceFilterApplyConfiguration represents an declarative configuration of the ResourceFilter type for use -// with apply. -type ResourceFilterApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - ResourceFilterModelApplyConfiguration `json:",inline"` -} - -// ResourceFilterApplyConfiguration constructs an declarative configuration of the ResourceFilter type for use with -// apply. -func ResourceFilter() *ResourceFilterApplyConfiguration { - return &ResourceFilterApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceFilterApplyConfiguration) WithDriverName(value string) *ResourceFilterApplyConfiguration { - b.DriverName = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceFilterApplyConfiguration) WithNamedResources(value *NamedResourcesFilterApplyConfiguration) *ResourceFilterApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go deleted file mode 100644 index 4f8d138f71..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceFilterModelApplyConfiguration represents an declarative configuration of the ResourceFilterModel type for use -// with apply. -type ResourceFilterModelApplyConfiguration struct { - NamedResources *NamedResourcesFilterApplyConfiguration `json:"namedResources,omitempty"` -} - -// ResourceFilterModelApplyConfiguration constructs an declarative configuration of the ResourceFilterModel type for use with -// apply. -func ResourceFilterModel() *ResourceFilterModelApplyConfiguration { - return &ResourceFilterModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceFilterModelApplyConfiguration) WithNamedResources(value *NamedResourcesFilterApplyConfiguration) *ResourceFilterModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go deleted file mode 100644 index b4f3da735d..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use -// with apply. -type ResourceHandleApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - Data *string `json:"data,omitempty"` - StructuredData *StructuredResourceHandleApplyConfiguration `json:"structuredData,omitempty"` -} - -// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with -// apply. -func ResourceHandle() *ResourceHandleApplyConfiguration { - return &ResourceHandleApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration { - b.DriverName = &value - return b -} - -// WithData sets the Data field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Data field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration { - b.Data = &value - return b -} - -// WithStructuredData sets the StructuredData field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StructuredData field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithStructuredData(value *StructuredResourceHandleApplyConfiguration) *ResourceHandleApplyConfiguration { - b.StructuredData = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go deleted file mode 100644 index 8ad7bdf230..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceModelApplyConfiguration represents an declarative configuration of the ResourceModel type for use -// with apply. -type ResourceModelApplyConfiguration struct { - NamedResources *NamedResourcesResourcesApplyConfiguration `json:"namedResources,omitempty"` -} - -// ResourceModelApplyConfiguration constructs an declarative configuration of the ResourceModel type for use with -// apply. -func ResourceModel() *ResourceModelApplyConfiguration { - return &ResourceModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceModelApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *ResourceModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go deleted file mode 100644 index 0243d06f89..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// ResourceRequestApplyConfiguration represents an declarative configuration of the ResourceRequest type for use -// with apply. -type ResourceRequestApplyConfiguration struct { - VendorParameters *runtime.RawExtension `json:"vendorParameters,omitempty"` - ResourceRequestModelApplyConfiguration `json:",inline"` -} - -// ResourceRequestApplyConfiguration constructs an declarative configuration of the ResourceRequest type for use with -// apply. -func ResourceRequest() *ResourceRequestApplyConfiguration { - return &ResourceRequestApplyConfiguration{} -} - -// WithVendorParameters sets the VendorParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorParameters field is set to the value of the last call. -func (b *ResourceRequestApplyConfiguration) WithVendorParameters(value runtime.RawExtension) *ResourceRequestApplyConfiguration { - b.VendorParameters = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceRequestApplyConfiguration) WithNamedResources(value *NamedResourcesRequestApplyConfiguration) *ResourceRequestApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go deleted file mode 100644 index 35bd1d88fe..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceRequestModelApplyConfiguration represents an declarative configuration of the ResourceRequestModel type for use -// with apply. -type ResourceRequestModelApplyConfiguration struct { - NamedResources *NamedResourcesRequestApplyConfiguration `json:"namedResources,omitempty"` -} - -// ResourceRequestModelApplyConfiguration constructs an declarative configuration of the ResourceRequestModel type for use with -// apply. -func ResourceRequestModel() *ResourceRequestModelApplyConfiguration { - return &ResourceRequestModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceRequestModelApplyConfiguration) WithNamedResources(value *NamedResourcesRequestApplyConfiguration) *ResourceRequestModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go deleted file mode 100644 index e6efcbfef3..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// StructuredResourceHandleApplyConfiguration represents an declarative configuration of the StructuredResourceHandle type for use -// with apply. -type StructuredResourceHandleApplyConfiguration struct { - VendorClassParameters *runtime.RawExtension `json:"vendorClassParameters,omitempty"` - VendorClaimParameters *runtime.RawExtension `json:"vendorClaimParameters,omitempty"` - NodeName *string `json:"nodeName,omitempty"` - Results []DriverAllocationResultApplyConfiguration `json:"results,omitempty"` -} - -// StructuredResourceHandleApplyConfiguration constructs an declarative configuration of the StructuredResourceHandle type for use with -// apply. -func StructuredResourceHandle() *StructuredResourceHandleApplyConfiguration { - return &StructuredResourceHandleApplyConfiguration{} -} - -// WithVendorClassParameters sets the VendorClassParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorClassParameters field is set to the value of the last call. -func (b *StructuredResourceHandleApplyConfiguration) WithVendorClassParameters(value runtime.RawExtension) *StructuredResourceHandleApplyConfiguration { - b.VendorClassParameters = &value - return b -} - -// WithVendorClaimParameters sets the VendorClaimParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorClaimParameters field is set to the value of the last call. -func (b *StructuredResourceHandleApplyConfiguration) WithVendorClaimParameters(value runtime.RawExtension) *StructuredResourceHandleApplyConfiguration { - b.VendorClaimParameters = &value - return b -} - -// WithNodeName sets the NodeName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeName field is set to the value of the last call. -func (b *StructuredResourceHandleApplyConfiguration) WithNodeName(value string) *StructuredResourceHandleApplyConfiguration { - b.NodeName = &value - return b -} - -// WithResults adds the given value to the Results field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Results field. -func (b *StructuredResourceHandleApplyConfiguration) WithResults(values ...*DriverAllocationResultApplyConfiguration) *StructuredResourceHandleApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResults") - } - b.Results = append(b.Results, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go new file mode 100644 index 0000000000..3090b2f9d3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use +// with apply. +type AllocationResultApplyConfiguration struct { + Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + Controller *string `json:"controller,omitempty"` +} + +// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with +// apply. +func AllocationResult() *AllocationResultApplyConfiguration { + return &AllocationResultApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration { + b.Devices = value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithController sets the Controller field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Controller field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithController(value string) *AllocationResultApplyConfiguration { + b.Controller = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go new file mode 100644 index 0000000000..e6b7745082 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use +// with apply. +type BasicDeviceApplyConfiguration struct { + Attributes map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` + Capacity map[v1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"` +} + +// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with +// apply. +func BasicDevice() *BasicDeviceApplyConfiguration { + return &BasicDeviceApplyConfiguration{} +} + +// WithAttributes puts the entries into the Attributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Attributes field, +// overwriting an existing map entries in Attributes field with the same key. +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Attributes == nil && len(entries) > 0 { + b.Attributes = make(map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Attributes[k] = v + } + return b +} + +// WithCapacity puts the entries into the Capacity field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Capacity field, +// overwriting an existing map entries in Capacity field with the same key. +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[v1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration { + if b.Capacity == nil && len(entries) > 0 { + b.Capacity = make(map[v1alpha3.QualifiedName]resource.Quantity, len(entries)) + } + for k, v := range entries { + b.Capacity[k] = v + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go new file mode 100644 index 0000000000..c59b6a2e37 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use +// with apply. +type CELDeviceSelectorApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with +// apply. +func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration { + return &CELDeviceSelectorApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go similarity index 50% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go index 311edbac80..efdb5f37a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go @@ -16,24 +16,33 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// NamedResourcesAllocationResultApplyConfiguration represents an declarative configuration of the NamedResourcesAllocationResult type for use +// DeviceApplyConfiguration represents a declarative configuration of the Device type for use // with apply. -type NamedResourcesAllocationResultApplyConfiguration struct { - Name *string `json:"name,omitempty"` +type DeviceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"` } -// NamedResourcesAllocationResultApplyConfiguration constructs an declarative configuration of the NamedResourcesAllocationResult type for use with +// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with // apply. -func NamedResourcesAllocationResult() *NamedResourcesAllocationResultApplyConfiguration { - return &NamedResourcesAllocationResultApplyConfiguration{} +func Device() *DeviceApplyConfiguration { + return &DeviceApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *NamedResourcesAllocationResultApplyConfiguration) WithName(value string) *NamedResourcesAllocationResultApplyConfiguration { +func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration { b.Name = &value return b } + +// WithBasic sets the Basic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Basic field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration { + b.Basic = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go new file mode 100644 index 0000000000..342e724ef0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use +// with apply. +type DeviceAllocationConfigurationApplyConfiguration struct { + Source *v1alpha3.AllocationConfigSource `json:"source,omitempty"` + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with +// apply. +func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration { + return &DeviceAllocationConfigurationApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value v1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { + b.Source = &value + return b +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go new file mode 100644 index 0000000000..0cfb264b4e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use +// with apply. +type DeviceAllocationResultApplyConfiguration struct { + Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"` + Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with +// apply. +func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration { + return &DeviceAllocationResultApplyConfiguration{} +} + +// WithResults adds the given value to the Results field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Results field. +func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResults") + } + b.Results = append(b.Results, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go new file mode 100644 index 0000000000..6b0b7a40ac --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use +// with apply. +type DeviceAttributeApplyConfiguration struct { + IntValue *int64 `json:"int,omitempty"` + BoolValue *bool `json:"bool,omitempty"` + StringValue *string `json:"string,omitempty"` + VersionValue *string `json:"version,omitempty"` +} + +// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with +// apply. +func DeviceAttribute() *DeviceAttributeApplyConfiguration { + return &DeviceAttributeApplyConfiguration{} +} + +// WithIntValue sets the IntValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration { + b.IntValue = &value + return b +} + +// WithBoolValue sets the BoolValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BoolValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration { + b.BoolValue = &value + return b +} + +// WithStringValue sets the StringValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StringValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration { + b.StringValue = &value + return b +} + +// WithVersionValue sets the VersionValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VersionValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration { + b.VersionValue = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go new file mode 100644 index 0000000000..ce3ab56d8b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use +// with apply. +type DeviceClaimApplyConfiguration struct { + Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"` + Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"` + Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with +// apply. +func DeviceClaim() *DeviceClaimApplyConfiguration { + return &DeviceClaimApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequests") + } + b.Requests = append(b.Requests, *values[i]) + } + return b +} + +// WithConstraints adds the given value to the Constraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Constraints field. +func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConstraints") + } + b.Constraints = append(b.Constraints, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go new file mode 100644 index 0000000000..4cabe98599 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use +// with apply. +type DeviceClaimConfigurationApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with +// apply. +func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration { + return &DeviceClaimConfigurationApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..abaadbb366 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use +// with apply. +type DeviceClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"` +} + +// DeviceClass constructs a declarative configuration of the DeviceClass type for use with +// apply. +func DeviceClass(name string) *DeviceClassApplyConfiguration { + b := &DeviceClassApplyConfiguration{} + b.WithName(name) + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractDeviceClass extracts the applied configuration owned by fieldManager from +// deviceClass. If no managedFields are found in deviceClass for fieldManager, a +// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API. +// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "") +} + +// ExtractDeviceClassStatus is the same as ExtractDeviceClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeviceClassStatus(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "status") +} + +func extractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) { + b := &DeviceClassApplyConfiguration{} + err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha3.DeviceClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(deviceClass.Name) + + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeviceClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go new file mode 100644 index 0000000000..cb3758a3e3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use +// with apply. +type DeviceClassConfigurationApplyConfiguration struct { + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with +// apply. +func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration { + return &DeviceClassConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go new file mode 100644 index 0000000000..d40a43de66 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use +// with apply. +type DeviceClassSpecApplyConfiguration struct { + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` + SuitableNodes *v1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` +} + +// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with +// apply. +func DeviceClassSpec() *DeviceClassSpecApplyConfiguration { + return &DeviceClassSpecApplyConfiguration{} +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} + +// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuitableNodes field is set to the value of the last call. +func (b *DeviceClassSpecApplyConfiguration) WithSuitableNodes(value *v1.NodeSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + b.SuitableNodes = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go new file mode 100644 index 0000000000..62c0d997de --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use +// with apply. +type DeviceConfigurationApplyConfiguration struct { + Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"` +} + +// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with +// apply. +func DeviceConfiguration() *DeviceConfigurationApplyConfiguration { + return &DeviceConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go new file mode 100644 index 0000000000..479acd57c2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use +// with apply. +type DeviceConstraintApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + MatchAttribute *v1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"` +} + +// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with +// apply. +func DeviceConstraint() *DeviceConstraintApplyConfiguration { + return &DeviceConstraintApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchAttribute field is set to the value of the last call. +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value v1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration { + b.MatchAttribute = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go new file mode 100644 index 0000000000..e5c87efe47 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use +// with apply. +type DeviceRequestApplyConfiguration struct { + Name *string `json:"name,omitempty"` + DeviceClassName *string `json:"deviceClassName,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + AllocationMode *resourcev1alpha3.DeviceAllocationMode `json:"allocationMode,omitempty"` + Count *int64 `json:"count,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with +// apply. +func DeviceRequest() *DeviceRequestApplyConfiguration { + return &DeviceRequestApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration { + b.Name = &value + return b +} + +// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeviceClassName field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration { + b.DeviceClassName = &value + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocationMode field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1alpha3.DeviceAllocationMode) *DeviceRequestApplyConfiguration { + b.AllocationMode = &value + return b +} + +// WithCount sets the Count field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Count field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration { + b.Count = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go new file mode 100644 index 0000000000..712b9bf9b1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use +// with apply. +type DeviceRequestAllocationResultApplyConfiguration struct { + Request *string `json:"request,omitempty"` + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` +} + +// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with +// apply. +func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration { + return &DeviceRequestAllocationResultApplyConfiguration{} +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Request = &value + return b +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Device = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go new file mode 100644 index 0000000000..574299d15e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use +// with apply. +type DeviceSelectorApplyConfiguration struct { + CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"` +} + +// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with +// apply. +func DeviceSelector() *DeviceSelectorApplyConfiguration { + return &DeviceSelectorApplyConfiguration{} +} + +// WithCEL sets the CEL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CEL field is set to the value of the last call. +func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration { + b.CEL = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go similarity index 54% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go index f7a8ff9ece..caf9d059c3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go @@ -16,37 +16,37 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// VendorParametersApplyConfiguration represents an declarative configuration of the VendorParameters type for use +// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use // with apply. -type VendorParametersApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` +type OpaqueDeviceConfigurationApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` Parameters *runtime.RawExtension `json:"parameters,omitempty"` } -// VendorParametersApplyConfiguration constructs an declarative configuration of the VendorParameters type for use with +// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with // apply. -func VendorParameters() *VendorParametersApplyConfiguration { - return &VendorParametersApplyConfiguration{} +func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration { + return &OpaqueDeviceConfigurationApplyConfiguration{} } -// WithDriverName sets the DriverName field in the declarative configuration to the given value +// WithDriver sets the Driver field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *VendorParametersApplyConfiguration) WithDriverName(value string) *VendorParametersApplyConfiguration { - b.DriverName = &value +// If called multiple times, the Driver field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration { + b.Driver = &value return b } // WithParameters sets the Parameters field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Parameters field is set to the value of the last call. -func (b *VendorParametersApplyConfiguration) WithParameters(value runtime.RawExtension) *VendorParametersApplyConfiguration { +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration { b.Parameters = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go index 1dfb6ff97b..ee8e73ebe2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use +// PodSchedulingContextApplyConfiguration represents a declarative configuration of the PodSchedulingContext type for use // with apply. type PodSchedulingContextApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,14 +36,14 @@ type PodSchedulingContextApplyConfiguration struct { Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` } -// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with +// PodSchedulingContext constructs a declarative configuration of the PodSchedulingContext type for use with // apply. func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { b := &PodSchedulingContextApplyConfiguration{} b.WithName(name) b.WithNamespace(namespace) b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } @@ -58,20 +58,20 @@ func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConf // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") } // ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") } -func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { b := &PodSchedulingContextApplyConfiguration{} - err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource) + err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha3.PodSchedulingContext"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSched b.WithNamespace(podSchedulingContext.Namespace) b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } @@ -256,3 +256,9 @@ func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodScheduling b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodSchedulingContextApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go similarity index 87% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go index c95d3295e8..fd25df7a53 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use +// PodSchedulingContextSpecApplyConfiguration represents a declarative configuration of the PodSchedulingContextSpec type for use // with apply. type PodSchedulingContextSpecApplyConfiguration struct { SelectedNode *string `json:"selectedNode,omitempty"` PotentialNodes []string `json:"potentialNodes,omitempty"` } -// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with +// PodSchedulingContextSpecApplyConfiguration constructs a declarative configuration of the PodSchedulingContextSpec type for use with // apply. func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { return &PodSchedulingContextSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go similarity index 84% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go index a8b10b9a0e..a06e370cc3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use +// PodSchedulingContextStatusApplyConfiguration represents a declarative configuration of the PodSchedulingContextStatus type for use // with apply. type PodSchedulingContextStatusApplyConfiguration struct { ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with +// PodSchedulingContextStatusApplyConfiguration constructs a declarative configuration of the PodSchedulingContextStatus type for use with // apply. func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { return &PodSchedulingContextStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go index 6c219f837b..6161595588 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use // with apply. type ResourceClaimApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,14 +36,14 @@ type ResourceClaimApplyConfiguration struct { Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` } -// ResourceClaim constructs an declarative configuration of the ResourceClaim type for use with +// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with // apply. func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { b := &ResourceClaimApplyConfiguration{} b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } @@ -58,20 +58,20 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "") } // ExtractResourceClaimStatus is the same as ExtractResourceClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "status") } -func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { +func extractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { b := &ResourceClaimApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaim"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldMa b.WithNamespace(resourceClaim.Namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } @@ -256,3 +256,9 @@ func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusA b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go index 41bb9e9a14..96196d7c95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go @@ -16,13 +16,13 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( types "k8s.io/apimachinery/pkg/types" ) -// ResourceClaimConsumerReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimConsumerReference type for use +// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use // with apply. type ResourceClaimConsumerReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -31,7 +31,7 @@ type ResourceClaimConsumerReferenceApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` } -// ResourceClaimConsumerReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimConsumerReference type for use with +// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with // apply. func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { return &ResourceClaimConsumerReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go similarity index 86% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go index e74679aed3..caab89acdb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use +// ResourceClaimSchedulingStatusApplyConfiguration represents a declarative configuration of the ResourceClaimSchedulingStatus type for use // with apply. type ResourceClaimSchedulingStatusApplyConfiguration struct { Name *string `json:"name,omitempty"` UnsuitableNodes []string `json:"unsuitableNodes,omitempty"` } -// ResourceClaimSchedulingStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimSchedulingStatus type for use with +// ResourceClaimSchedulingStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimSchedulingStatus type for use with // apply. func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration { return &ResourceClaimSchedulingStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go new file mode 100644 index 0000000000..7c5b65681d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use +// with apply. +type ResourceClaimSpecApplyConfiguration struct { + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` + Controller *string `json:"controller,omitempty"` +} + +// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with +// apply. +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { + return &ResourceClaimSpecApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration { + b.Devices = value + return b +} + +// WithController sets the Controller field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Controller field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithController(value string) *ResourceClaimSpecApplyConfiguration { + b.Controller = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go similarity index 77% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go index c6fa610906..a52af3ec36 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go @@ -16,31 +16,22 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use +// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use // with apply. type ResourceClaimStatusApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` DeallocationRequested *bool `json:"deallocationRequested,omitempty"` } -// ResourceClaimStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimStatus type for use with +// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with // apply. func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { return &ResourceClaimStatusApplyConfiguration{} } -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceClaimStatusApplyConfiguration) WithDriverName(value string) *ResourceClaimStatusApplyConfiguration { - b.DriverName = &value - return b -} - // WithAllocation sets the Allocation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Allocation field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go index fc2209b8f0..6f371d0c05 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceClaimTemplateApplyConfiguration represents an declarative configuration of the ResourceClaimTemplate type for use +// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use // with apply. type ResourceClaimTemplateApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,14 +35,14 @@ type ResourceClaimTemplateApplyConfiguration struct { Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` } -// ResourceClaimTemplate constructs an declarative configuration of the ResourceClaimTemplate type for use with +// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with // apply. func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { b := &ResourceClaimTemplateApplyConfiguration{} b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } @@ -57,20 +57,20 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") } // ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") } -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { b := &ResourceClaimTemplateApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaimTemplate"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.Resour b.WithNamespace(resourceClaimTemplate.Namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } @@ -247,3 +247,9 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimT b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go index 2f38ea0366..5b03ab7553 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceClaimTemplateSpecApplyConfiguration represents an declarative configuration of the ResourceClaimTemplateSpec type for use +// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use // with apply. type ResourceClaimTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` } -// ResourceClaimTemplateSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimTemplateSpec type for use with +// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with // apply. func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { return &ResourceClaimTemplateSpecApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceCl b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go new file mode 100644 index 0000000000..23825d137f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use +// with apply. +type ResourcePoolApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"` +} + +// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with +// apply. +func ResourcePool() *ResourcePoolApplyConfiguration { + return &ResourcePoolApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration { + b.Name = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration { + b.Generation = &value + return b +} + +// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceSliceCount field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration { + b.ResourceSliceCount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go similarity index 84% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go index ff737ce672..aaad68612e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,23 +27,21 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceSliceApplyConfiguration represents an declarative configuration of the ResourceSlice type for use +// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use // with apply. type ResourceSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - NodeName *string `json:"nodeName,omitempty"` - DriverName *string `json:"driverName,omitempty"` - ResourceModelApplyConfiguration `json:",inline"` + Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"` } -// ResourceSlice constructs an declarative configuration of the ResourceSlice type for use with +// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with // apply. func ResourceSlice(name string) *ResourceSliceApplyConfiguration { b := &ResourceSliceApplyConfiguration{} b.WithName(name) b.WithKind("ResourceSlice") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } @@ -58,27 +56,27 @@ func ResourceSlice(name string) *ResourceSliceApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { +func ExtractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { return extractResourceSlice(resourceSlice, fieldManager, "") } // ExtractResourceSliceStatus is the same as ExtractResourceSlice except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { +func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { return extractResourceSlice(resourceSlice, fieldManager, "status") } -func extractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { +func extractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { b := &ResourceSliceApplyConfiguration{} - err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceSlice"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceSlice"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(resourceSlice.Name) b.WithKind("ResourceSlice") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } @@ -240,26 +238,16 @@ func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExis } } -// WithNodeName sets the NodeName field in the declarative configuration to the given value +// WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeName field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithNodeName(value string) *ResourceSliceApplyConfiguration { - b.NodeName = &value +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration { + b.Spec = value return b } -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithDriverName(value string) *ResourceSliceApplyConfiguration { - b.DriverName = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *ResourceSliceApplyConfiguration { - b.NamedResources = value - return b +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go new file mode 100644 index 0000000000..2ded759073 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use +// with apply. +type ResourceSliceSpecApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + AllNodes *bool `json:"allNodes,omitempty"` + Devices []DeviceApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with +// apply. +func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration { + return &ResourceSliceSpecApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration { + b.NodeName = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithAllNodes sets the AllNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllNodes field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration { + b.AllNodes = &value + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go index b57e8ba57d..f2f135abc6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go index 0cd09d5d1c..098517675e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go index 98cfb14c70..075862fe3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go index aeead0861c..39d8357029 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct { Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go index a1ef00656b..b2dcb0feea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/storage/v1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` @@ -35,7 +35,7 @@ type CSIDriverSpecApplyConfiguration struct { SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go index d8296e4856..8a53e7984e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct { Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go index 6219ef1151..8c69e435e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go index 30d1d4546b..21d3ba7ccc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go index c47c6b8215..0e293248d9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go index 98c4c22336..26d70bc8b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go @@ -29,7 +29,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct { AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go index 6665a1ff2e..77b96db2f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go index 4c74f09aa2..72c351208c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go index 2bf3f7720d..4778553986 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go index a55f7c8ea1..8965392352 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go index 015b08e6eb..14293376d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go index 4bf829f8a9..039e5f32bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go index 3c5fd3dc29..735853c48b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go index 8b810fed10..aa949e28c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go index bcefb5778a..9648621ac3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go index 82872cc355..be7da5dd15 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go index 2710ff8864..e97487a645 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go index 43803496e8..a287fc6b28 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go index 9d4c476259..f95bc55477 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttributesClassApplyConfiguration represents an declarative configuration of the VolumeAttributesClass type for use +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use // with apply. type VolumeAttributesClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttributesClassApplyConfiguration struct { Parameters map[string]string `json:"parameters,omitempty"` } -// VolumeAttributesClass constructs an declarative configuration of the VolumeAttributesClass type for use with +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with // apply. func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { b := &VolumeAttributesClassApplyConfiguration{} @@ -260,3 +260,9 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go index cbff16fd0c..ef8f6bbe64 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go index 4266f0b6e4..b9a807bd8a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct { Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go index 6097a615be..5f4e068f0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` @@ -35,7 +35,7 @@ type CSIDriverSpecApplyConfiguration struct { SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go index 91588fd9fb..af0f41cf0a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct { Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go index 2c7de497b2..65ad771bb2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go index 94ff1b4611..c9cbea1d9c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go index 2854a15da7..19350e5a6f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go index 02194f1080..fa504a44ec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go @@ -29,7 +29,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct { AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go index 89c99d5602..e0f2df28e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go index 9fccaf5cf9..b0711d7314 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go index 9700b38ee2..b08dd3148b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go index 1d5e304bb5..3bdaeb45d7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go index fa1855a241..f7046cdb35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..7b221d2775 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,268 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use +// with apply. +type VolumeAttributesClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DriverName *string `json:"driverName,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` +} + +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with +// apply. +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { + b := &VolumeAttributesClassApplyConfiguration{} + b.WithName(name) + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b +} + +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") +} + +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") +} + +func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { + b := &VolumeAttributesClassApplyConfiguration{} + err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(volumeAttributesClass.Name) + + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParameters puts the entries into the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Parameters field, +// overwriting an existing map entries in Parameters field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + if b.Parameters == nil && len(entries) > 0 { + b.Parameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Parameters[k] = v + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go index 3f255fce75..fec1c9ade3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go index 4b69b64c9b..b42c9decc0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go index c733ac5c04..c8f9f009a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// GroupVersionResourceApplyConfiguration represents an declarative configuration of the GroupVersionResource type for use +// GroupVersionResourceApplyConfiguration represents a declarative configuration of the GroupVersionResource type for use // with apply. type GroupVersionResourceApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -26,7 +26,7 @@ type GroupVersionResourceApplyConfiguration struct { Resource *string `json:"resource,omitempty"` } -// GroupVersionResourceApplyConfiguration constructs an declarative configuration of the GroupVersionResource type for use with +// GroupVersionResourceApplyConfiguration constructs a declarative configuration of the GroupVersionResource type for use with // apply. func GroupVersionResource() *GroupVersionResourceApplyConfiguration { return &GroupVersionResourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go index d0f863446e..dcdbc60c7c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// MigrationConditionApplyConfiguration represents an declarative configuration of the MigrationCondition type for use +// MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use // with apply. type MigrationConditionApplyConfiguration struct { Type *v1alpha1.MigrationConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type MigrationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// MigrationConditionApplyConfiguration constructs an declarative configuration of the MigrationCondition type for use with +// MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with // apply. func MigrationCondition() *MigrationConditionApplyConfiguration { return &MigrationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go index cc57b2b126..7e6452a777 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageVersionMigrationApplyConfiguration represents an declarative configuration of the StorageVersionMigration type for use +// StorageVersionMigrationApplyConfiguration represents a declarative configuration of the StorageVersionMigration type for use // with apply. type StorageVersionMigrationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StorageVersionMigrationApplyConfiguration struct { Status *StorageVersionMigrationStatusApplyConfiguration `json:"status,omitempty"` } -// StorageVersionMigration constructs an declarative configuration of the StorageVersionMigration type for use with +// StorageVersionMigration constructs a declarative configuration of the StorageVersionMigration type for use with // apply. func StorageVersionMigration(name string) *StorageVersionMigrationApplyConfiguration { b := &StorageVersionMigrationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *StorageVersionMigrationApplyConfiguration) WithStatus(value *StorageVer b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionMigrationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go index 6c7c5b2645..02ddb540f8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// StorageVersionMigrationSpecApplyConfiguration represents an declarative configuration of the StorageVersionMigrationSpec type for use +// StorageVersionMigrationSpecApplyConfiguration represents a declarative configuration of the StorageVersionMigrationSpec type for use // with apply. type StorageVersionMigrationSpecApplyConfiguration struct { Resource *GroupVersionResourceApplyConfiguration `json:"resource,omitempty"` ContinueToken *string `json:"continueToken,omitempty"` } -// StorageVersionMigrationSpecApplyConfiguration constructs an declarative configuration of the StorageVersionMigrationSpec type for use with +// StorageVersionMigrationSpecApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationSpec type for use with // apply. func StorageVersionMigrationSpec() *StorageVersionMigrationSpecApplyConfiguration { return &StorageVersionMigrationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go index b8d397548a..fc957cb15c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// StorageVersionMigrationStatusApplyConfiguration represents an declarative configuration of the StorageVersionMigrationStatus type for use +// StorageVersionMigrationStatusApplyConfiguration represents a declarative configuration of the StorageVersionMigrationStatus type for use // with apply. type StorageVersionMigrationStatusApplyConfiguration struct { Conditions []MigrationConditionApplyConfiguration `json:"conditions,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` } -// StorageVersionMigrationStatusApplyConfiguration constructs an declarative configuration of the StorageVersionMigrationStatus type for use with +// StorageVersionMigrationStatusApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationStatus type for use with // apply. func StorageVersionMigrationStatus() *StorageVersionMigrationStatusApplyConfiguration { return &StorageVersionMigrationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/utils.go b/vendor/k8s.io/client-go/applyconfigurations/utils.go new file mode 100644 index 0000000000..0955b8f44f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/utils.go @@ -0,0 +1,1740 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "k8s.io/api/admissionregistration/v1" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + v1beta2 "k8s.io/api/apps/v1beta2" + autoscalingv1 "k8s.io/api/autoscaling/v1" + v2 "k8s.io/api/autoscaling/v2" + v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + eventsv1 "k8s.io/api/events/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + v1alpha3 "k8s.io/api/resource/v1alpha3" + schedulingv1 "k8s.io/api/scheduling/v1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsapiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + applyconfigurationsbatchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + applyconfigurationscertificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + applyconfigurationscoordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + applyconfigurationscoordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + applyconfigurationsdiscoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + applyconfigurationseventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + applyconfigurationseventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + applyconfigurationsimagepolicyv1alpha1 "k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1" + internal "k8s.io/client-go/applyconfigurations/internal" + applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1" + applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + applyconfigurationsnodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + applyconfigurationspolicyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=admissionregistration.k8s.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("AuditAnnotation"): + return &admissionregistrationv1.AuditAnnotationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ExpressionWarning"): + return &admissionregistrationv1.ExpressionWarningApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MatchCondition"): + return &admissionregistrationv1.MatchConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MatchResources"): + return &admissionregistrationv1.MatchResourcesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MutatingWebhook"): + return &admissionregistrationv1.MutatingWebhookApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"): + return &admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"): + return &admissionregistrationv1.NamedRuleWithOperationsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ParamKind"): + return &admissionregistrationv1.ParamKindApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ParamRef"): + return &admissionregistrationv1.ParamRefApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Rule"): + return &admissionregistrationv1.RuleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RuleWithOperations"): + return &admissionregistrationv1.RuleWithOperationsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ServiceReference"): + return &admissionregistrationv1.ServiceReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TypeChecking"): + return &admissionregistrationv1.TypeCheckingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"): + return &admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"): + return &admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"): + return &admissionregistrationv1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"): + return &admissionregistrationv1.ValidatingAdmissionPolicySpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"): + return &admissionregistrationv1.ValidatingAdmissionPolicyStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingWebhook"): + return &admissionregistrationv1.ValidatingWebhookApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"): + return &admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Validation"): + return &admissionregistrationv1.ValidationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Variable"): + return &admissionregistrationv1.VariableApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("WebhookClientConfig"): + return &admissionregistrationv1.WebhookClientConfigApplyConfiguration{} + + // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithKind("AuditAnnotation"): + return &admissionregistrationv1alpha1.AuditAnnotationApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ExpressionWarning"): + return &admissionregistrationv1alpha1.ExpressionWarningApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MatchCondition"): + return &admissionregistrationv1alpha1.MatchConditionApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MatchResources"): + return &admissionregistrationv1alpha1.MatchResourcesApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"): + return &admissionregistrationv1alpha1.NamedRuleWithOperationsApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ParamKind"): + return &admissionregistrationv1alpha1.ParamKindApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ParamRef"): + return &admissionregistrationv1alpha1.ParamRefApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("TypeChecking"): + return &admissionregistrationv1alpha1.TypeCheckingApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicySpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Validation"): + return &admissionregistrationv1alpha1.ValidationApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Variable"): + return &admissionregistrationv1alpha1.VariableApplyConfiguration{} + + // Group=admissionregistration.k8s.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithKind("AuditAnnotation"): + return &admissionregistrationv1beta1.AuditAnnotationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ExpressionWarning"): + return &admissionregistrationv1beta1.ExpressionWarningApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MatchCondition"): + return &admissionregistrationv1beta1.MatchConditionApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MatchResources"): + return &admissionregistrationv1beta1.MatchResourcesApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MutatingWebhook"): + return &admissionregistrationv1beta1.MutatingWebhookApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"): + return &admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"): + return &admissionregistrationv1beta1.NamedRuleWithOperationsApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ParamKind"): + return &admissionregistrationv1beta1.ParamKindApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ParamRef"): + return &admissionregistrationv1beta1.ParamRefApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ServiceReference"): + return &admissionregistrationv1beta1.ServiceReferenceApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("TypeChecking"): + return &admissionregistrationv1beta1.TypeCheckingApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicySpecApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyStatusApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhook"): + return &admissionregistrationv1beta1.ValidatingWebhookApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"): + return &admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("Validation"): + return &admissionregistrationv1beta1.ValidationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("Variable"): + return &admissionregistrationv1beta1.VariableApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("WebhookClientConfig"): + return &admissionregistrationv1beta1.WebhookClientConfigApplyConfiguration{} + + // Group=apps, Version=v1 + case appsv1.SchemeGroupVersion.WithKind("ControllerRevision"): + return &applyconfigurationsappsv1.ControllerRevisionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSet"): + return &applyconfigurationsappsv1.DaemonSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetCondition"): + return &applyconfigurationsappsv1.DaemonSetConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetSpec"): + return &applyconfigurationsappsv1.DaemonSetSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetStatus"): + return &applyconfigurationsappsv1.DaemonSetStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"): + return &applyconfigurationsappsv1.DaemonSetUpdateStrategyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("Deployment"): + return &applyconfigurationsappsv1.DeploymentApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &applyconfigurationsappsv1.DeploymentConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &applyconfigurationsappsv1.DeploymentSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &applyconfigurationsappsv1.DeploymentStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &applyconfigurationsappsv1.DeploymentStrategyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSet"): + return &applyconfigurationsappsv1.ReplicaSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSetCondition"): + return &applyconfigurationsappsv1.ReplicaSetConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSetSpec"): + return &applyconfigurationsappsv1.ReplicaSetSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSetStatus"): + return &applyconfigurationsappsv1.ReplicaSetStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"): + return &applyconfigurationsappsv1.RollingUpdateDaemonSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &applyconfigurationsappsv1.RollingUpdateDeploymentApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"): + return &applyconfigurationsappsv1.RollingUpdateStatefulSetStrategyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSet"): + return &applyconfigurationsappsv1.StatefulSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetCondition"): + return &applyconfigurationsappsv1.StatefulSetConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetOrdinals"): + return &applyconfigurationsappsv1.StatefulSetOrdinalsApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"): + return &applyconfigurationsappsv1.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetSpec"): + return &applyconfigurationsappsv1.StatefulSetSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetStatus"): + return &applyconfigurationsappsv1.StatefulSetStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"): + return &applyconfigurationsappsv1.StatefulSetUpdateStrategyApplyConfiguration{} + + // Group=apps, Version=v1beta1 + case appsv1beta1.SchemeGroupVersion.WithKind("ControllerRevision"): + return &applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("Deployment"): + return &applyconfigurationsappsv1beta1.DeploymentApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &applyconfigurationsappsv1beta1.DeploymentConditionApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &applyconfigurationsappsv1beta1.DeploymentSpecApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &applyconfigurationsappsv1beta1.DeploymentStatusApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &applyconfigurationsappsv1beta1.DeploymentStrategyApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("RollbackConfig"): + return &applyconfigurationsappsv1beta1.RollbackConfigApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &applyconfigurationsappsv1beta1.RollingUpdateDeploymentApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"): + return &applyconfigurationsappsv1beta1.RollingUpdateStatefulSetStrategyApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"): + return &applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetCondition"): + return &applyconfigurationsappsv1beta1.StatefulSetConditionApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetOrdinals"): + return &applyconfigurationsappsv1beta1.StatefulSetOrdinalsApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"): + return &applyconfigurationsappsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetSpec"): + return &applyconfigurationsappsv1beta1.StatefulSetSpecApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetStatus"): + return &applyconfigurationsappsv1beta1.StatefulSetStatusApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"): + return &applyconfigurationsappsv1beta1.StatefulSetUpdateStrategyApplyConfiguration{} + + // Group=apps, Version=v1beta2 + case v1beta2.SchemeGroupVersion.WithKind("ControllerRevision"): + return &appsv1beta2.ControllerRevisionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSet"): + return &appsv1beta2.DaemonSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetCondition"): + return &appsv1beta2.DaemonSetConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetSpec"): + return &appsv1beta2.DaemonSetSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetStatus"): + return &appsv1beta2.DaemonSetStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"): + return &appsv1beta2.DaemonSetUpdateStrategyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("Deployment"): + return &appsv1beta2.DeploymentApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &appsv1beta2.DeploymentConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &appsv1beta2.DeploymentSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &appsv1beta2.DeploymentStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &appsv1beta2.DeploymentStrategyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSet"): + return &appsv1beta2.ReplicaSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetCondition"): + return &appsv1beta2.ReplicaSetConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetSpec"): + return &appsv1beta2.ReplicaSetSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetStatus"): + return &appsv1beta2.ReplicaSetStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"): + return &appsv1beta2.RollingUpdateDaemonSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &appsv1beta2.RollingUpdateDeploymentApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"): + return &appsv1beta2.RollingUpdateStatefulSetStrategyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("Scale"): + return &appsv1beta2.ScaleApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSet"): + return &appsv1beta2.StatefulSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetCondition"): + return &appsv1beta2.StatefulSetConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetOrdinals"): + return &appsv1beta2.StatefulSetOrdinalsApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"): + return &appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetSpec"): + return &appsv1beta2.StatefulSetSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetStatus"): + return &appsv1beta2.StatefulSetStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"): + return &appsv1beta2.StatefulSetUpdateStrategyApplyConfiguration{} + + // Group=autoscaling, Version=v1 + case autoscalingv1.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &applyconfigurationsautoscalingv1.CrossVersionObjectReferenceApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerSpecApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerStatusApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("Scale"): + return &applyconfigurationsautoscalingv1.ScaleApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("ScaleSpec"): + return &applyconfigurationsautoscalingv1.ScaleSpecApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("ScaleStatus"): + return &applyconfigurationsautoscalingv1.ScaleStatusApplyConfiguration{} + + // Group=autoscaling, Version=v2 + case v2.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"): + return &autoscalingv2.ContainerResourceMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"): + return &autoscalingv2.ContainerResourceMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &autoscalingv2.CrossVersionObjectReferenceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ExternalMetricSource"): + return &autoscalingv2.ExternalMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ExternalMetricStatus"): + return &autoscalingv2.ExternalMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &autoscalingv2.HorizontalPodAutoscalerApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerBehavior"): + return &autoscalingv2.HorizontalPodAutoscalerBehaviorApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"): + return &autoscalingv2.HorizontalPodAutoscalerConditionApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &autoscalingv2.HorizontalPodAutoscalerSpecApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &autoscalingv2.HorizontalPodAutoscalerStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HPAScalingPolicy"): + return &autoscalingv2.HPAScalingPolicyApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HPAScalingRules"): + return &autoscalingv2.HPAScalingRulesApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricIdentifier"): + return &autoscalingv2.MetricIdentifierApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricSpec"): + return &autoscalingv2.MetricSpecApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricStatus"): + return &autoscalingv2.MetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricTarget"): + return &autoscalingv2.MetricTargetApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricValueStatus"): + return &autoscalingv2.MetricValueStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ObjectMetricSource"): + return &autoscalingv2.ObjectMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ObjectMetricStatus"): + return &autoscalingv2.ObjectMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("PodsMetricSource"): + return &autoscalingv2.PodsMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("PodsMetricStatus"): + return &autoscalingv2.PodsMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ResourceMetricSource"): + return &autoscalingv2.ResourceMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ResourceMetricStatus"): + return &autoscalingv2.ResourceMetricStatusApplyConfiguration{} + + // Group=autoscaling, Version=v2beta1 + case v2beta1.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"): + return &autoscalingv2beta1.ContainerResourceMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"): + return &autoscalingv2beta1.ContainerResourceMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &autoscalingv2beta1.CrossVersionObjectReferenceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ExternalMetricSource"): + return &autoscalingv2beta1.ExternalMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ExternalMetricStatus"): + return &autoscalingv2beta1.ExternalMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"): + return &autoscalingv2beta1.HorizontalPodAutoscalerConditionApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &autoscalingv2beta1.HorizontalPodAutoscalerSpecApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &autoscalingv2beta1.HorizontalPodAutoscalerStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("MetricSpec"): + return &autoscalingv2beta1.MetricSpecApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("MetricStatus"): + return &autoscalingv2beta1.MetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ObjectMetricSource"): + return &autoscalingv2beta1.ObjectMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ObjectMetricStatus"): + return &autoscalingv2beta1.ObjectMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("PodsMetricSource"): + return &autoscalingv2beta1.PodsMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("PodsMetricStatus"): + return &autoscalingv2beta1.PodsMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ResourceMetricSource"): + return &autoscalingv2beta1.ResourceMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ResourceMetricStatus"): + return &autoscalingv2beta1.ResourceMetricStatusApplyConfiguration{} + + // Group=autoscaling, Version=v2beta2 + case v2beta2.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"): + return &autoscalingv2beta2.ContainerResourceMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"): + return &autoscalingv2beta2.ContainerResourceMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &autoscalingv2beta2.CrossVersionObjectReferenceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ExternalMetricSource"): + return &autoscalingv2beta2.ExternalMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ExternalMetricStatus"): + return &autoscalingv2beta2.ExternalMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerBehavior"): + return &autoscalingv2beta2.HorizontalPodAutoscalerBehaviorApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"): + return &autoscalingv2beta2.HorizontalPodAutoscalerConditionApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &autoscalingv2beta2.HorizontalPodAutoscalerSpecApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &autoscalingv2beta2.HorizontalPodAutoscalerStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HPAScalingPolicy"): + return &autoscalingv2beta2.HPAScalingPolicyApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HPAScalingRules"): + return &autoscalingv2beta2.HPAScalingRulesApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricIdentifier"): + return &autoscalingv2beta2.MetricIdentifierApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricSpec"): + return &autoscalingv2beta2.MetricSpecApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricStatus"): + return &autoscalingv2beta2.MetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricTarget"): + return &autoscalingv2beta2.MetricTargetApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricValueStatus"): + return &autoscalingv2beta2.MetricValueStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ObjectMetricSource"): + return &autoscalingv2beta2.ObjectMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ObjectMetricStatus"): + return &autoscalingv2beta2.ObjectMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("PodsMetricSource"): + return &autoscalingv2beta2.PodsMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("PodsMetricStatus"): + return &autoscalingv2beta2.PodsMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ResourceMetricSource"): + return &autoscalingv2beta2.ResourceMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ResourceMetricStatus"): + return &autoscalingv2beta2.ResourceMetricStatusApplyConfiguration{} + + // Group=batch, Version=v1 + case batchv1.SchemeGroupVersion.WithKind("CronJob"): + return &applyconfigurationsbatchv1.CronJobApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("CronJobSpec"): + return &applyconfigurationsbatchv1.CronJobSpecApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("CronJobStatus"): + return &applyconfigurationsbatchv1.CronJobStatusApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("Job"): + return &applyconfigurationsbatchv1.JobApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobCondition"): + return &applyconfigurationsbatchv1.JobConditionApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobSpec"): + return &applyconfigurationsbatchv1.JobSpecApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobStatus"): + return &applyconfigurationsbatchv1.JobStatusApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobTemplateSpec"): + return &applyconfigurationsbatchv1.JobTemplateSpecApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicy"): + return &applyconfigurationsbatchv1.PodFailurePolicyApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyOnExitCodesRequirement"): + return &applyconfigurationsbatchv1.PodFailurePolicyOnExitCodesRequirementApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyOnPodConditionsPattern"): + return &applyconfigurationsbatchv1.PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyRule"): + return &applyconfigurationsbatchv1.PodFailurePolicyRuleApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("SuccessPolicy"): + return &applyconfigurationsbatchv1.SuccessPolicyApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("SuccessPolicyRule"): + return &applyconfigurationsbatchv1.SuccessPolicyRuleApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("UncountedTerminatedPods"): + return &applyconfigurationsbatchv1.UncountedTerminatedPodsApplyConfiguration{} + + // Group=batch, Version=v1beta1 + case batchv1beta1.SchemeGroupVersion.WithKind("CronJob"): + return &applyconfigurationsbatchv1beta1.CronJobApplyConfiguration{} + case batchv1beta1.SchemeGroupVersion.WithKind("CronJobSpec"): + return &applyconfigurationsbatchv1beta1.CronJobSpecApplyConfiguration{} + case batchv1beta1.SchemeGroupVersion.WithKind("CronJobStatus"): + return &applyconfigurationsbatchv1beta1.CronJobStatusApplyConfiguration{} + case batchv1beta1.SchemeGroupVersion.WithKind("JobTemplateSpec"): + return &applyconfigurationsbatchv1beta1.JobTemplateSpecApplyConfiguration{} + + // Group=certificates.k8s.io, Version=v1 + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration{} + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestCondition"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestConditionApplyConfiguration{} + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestSpec"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestSpecApplyConfiguration{} + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestStatus"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestStatusApplyConfiguration{} + + // Group=certificates.k8s.io, Version=v1alpha1 + case certificatesv1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundle"): + return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration{} + case certificatesv1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundleSpec"): + return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleSpecApplyConfiguration{} + + // Group=certificates.k8s.io, Version=v1beta1 + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration{} + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestCondition"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestConditionApplyConfiguration{} + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestSpec"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestSpecApplyConfiguration{} + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestStatus"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestStatusApplyConfiguration{} + + // Group=coordination.k8s.io, Version=v1 + case coordinationv1.SchemeGroupVersion.WithKind("Lease"): + return &applyconfigurationscoordinationv1.LeaseApplyConfiguration{} + case coordinationv1.SchemeGroupVersion.WithKind("LeaseSpec"): + return &applyconfigurationscoordinationv1.LeaseSpecApplyConfiguration{} + + // Group=coordination.k8s.io, Version=v1alpha1 + case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate"): + return &applyconfigurationscoordinationv1alpha1.LeaseCandidateApplyConfiguration{} + case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidateSpec"): + return &applyconfigurationscoordinationv1alpha1.LeaseCandidateSpecApplyConfiguration{} + + // Group=coordination.k8s.io, Version=v1beta1 + case coordinationv1beta1.SchemeGroupVersion.WithKind("Lease"): + return &applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration{} + case coordinationv1beta1.SchemeGroupVersion.WithKind("LeaseSpec"): + return &applyconfigurationscoordinationv1beta1.LeaseSpecApplyConfiguration{} + + // Group=core, Version=v1 + case corev1.SchemeGroupVersion.WithKind("Affinity"): + return &applyconfigurationscorev1.AffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AppArmorProfile"): + return &applyconfigurationscorev1.AppArmorProfileApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AttachedVolume"): + return &applyconfigurationscorev1.AttachedVolumeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AWSElasticBlockStoreVolumeSource"): + return &applyconfigurationscorev1.AWSElasticBlockStoreVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AzureDiskVolumeSource"): + return &applyconfigurationscorev1.AzureDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AzureFilePersistentVolumeSource"): + return &applyconfigurationscorev1.AzureFilePersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AzureFileVolumeSource"): + return &applyconfigurationscorev1.AzureFileVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Capabilities"): + return &applyconfigurationscorev1.CapabilitiesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CephFSPersistentVolumeSource"): + return &applyconfigurationscorev1.CephFSPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CephFSVolumeSource"): + return &applyconfigurationscorev1.CephFSVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CinderPersistentVolumeSource"): + return &applyconfigurationscorev1.CinderPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CinderVolumeSource"): + return &applyconfigurationscorev1.CinderVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ClientIPConfig"): + return &applyconfigurationscorev1.ClientIPConfigApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ClusterTrustBundleProjection"): + return &applyconfigurationscorev1.ClusterTrustBundleProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ComponentCondition"): + return &applyconfigurationscorev1.ComponentConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ComponentStatus"): + return &applyconfigurationscorev1.ComponentStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMap"): + return &applyconfigurationscorev1.ConfigMapApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapEnvSource"): + return &applyconfigurationscorev1.ConfigMapEnvSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapKeySelector"): + return &applyconfigurationscorev1.ConfigMapKeySelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapNodeConfigSource"): + return &applyconfigurationscorev1.ConfigMapNodeConfigSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapProjection"): + return &applyconfigurationscorev1.ConfigMapProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapVolumeSource"): + return &applyconfigurationscorev1.ConfigMapVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Container"): + return &applyconfigurationscorev1.ContainerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerImage"): + return &applyconfigurationscorev1.ContainerImageApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerPort"): + return &applyconfigurationscorev1.ContainerPortApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerResizePolicy"): + return &applyconfigurationscorev1.ContainerResizePolicyApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerState"): + return &applyconfigurationscorev1.ContainerStateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStateRunning"): + return &applyconfigurationscorev1.ContainerStateRunningApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStateTerminated"): + return &applyconfigurationscorev1.ContainerStateTerminatedApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStateWaiting"): + return &applyconfigurationscorev1.ContainerStateWaitingApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStatus"): + return &applyconfigurationscorev1.ContainerStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerUser"): + return &applyconfigurationscorev1.ContainerUserApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CSIPersistentVolumeSource"): + return &applyconfigurationscorev1.CSIPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CSIVolumeSource"): + return &applyconfigurationscorev1.CSIVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DaemonEndpoint"): + return &applyconfigurationscorev1.DaemonEndpointApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DownwardAPIProjection"): + return &applyconfigurationscorev1.DownwardAPIProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DownwardAPIVolumeFile"): + return &applyconfigurationscorev1.DownwardAPIVolumeFileApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DownwardAPIVolumeSource"): + return &applyconfigurationscorev1.DownwardAPIVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EmptyDirVolumeSource"): + return &applyconfigurationscorev1.EmptyDirVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EndpointAddress"): + return &applyconfigurationscorev1.EndpointAddressApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EndpointPort"): + return &applyconfigurationscorev1.EndpointPortApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Endpoints"): + return &applyconfigurationscorev1.EndpointsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EndpointSubset"): + return &applyconfigurationscorev1.EndpointSubsetApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EnvFromSource"): + return &applyconfigurationscorev1.EnvFromSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EnvVar"): + return &applyconfigurationscorev1.EnvVarApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EnvVarSource"): + return &applyconfigurationscorev1.EnvVarSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EphemeralContainer"): + return &applyconfigurationscorev1.EphemeralContainerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EphemeralContainerCommon"): + return &applyconfigurationscorev1.EphemeralContainerCommonApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EphemeralVolumeSource"): + return &applyconfigurationscorev1.EphemeralVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Event"): + return &applyconfigurationscorev1.EventApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EventSeries"): + return &applyconfigurationscorev1.EventSeriesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EventSource"): + return &applyconfigurationscorev1.EventSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ExecAction"): + return &applyconfigurationscorev1.ExecActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FCVolumeSource"): + return &applyconfigurationscorev1.FCVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FlexPersistentVolumeSource"): + return &applyconfigurationscorev1.FlexPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FlexVolumeSource"): + return &applyconfigurationscorev1.FlexVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FlockerVolumeSource"): + return &applyconfigurationscorev1.FlockerVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GCEPersistentDiskVolumeSource"): + return &applyconfigurationscorev1.GCEPersistentDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GitRepoVolumeSource"): + return &applyconfigurationscorev1.GitRepoVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GlusterfsPersistentVolumeSource"): + return &applyconfigurationscorev1.GlusterfsPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GlusterfsVolumeSource"): + return &applyconfigurationscorev1.GlusterfsVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GRPCAction"): + return &applyconfigurationscorev1.GRPCActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HostAlias"): + return &applyconfigurationscorev1.HostAliasApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HostIP"): + return &applyconfigurationscorev1.HostIPApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HostPathVolumeSource"): + return &applyconfigurationscorev1.HostPathVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HTTPGetAction"): + return &applyconfigurationscorev1.HTTPGetActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HTTPHeader"): + return &applyconfigurationscorev1.HTTPHeaderApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ImageVolumeSource"): + return &applyconfigurationscorev1.ImageVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ISCSIPersistentVolumeSource"): + return &applyconfigurationscorev1.ISCSIPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ISCSIVolumeSource"): + return &applyconfigurationscorev1.ISCSIVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("KeyToPath"): + return &applyconfigurationscorev1.KeyToPathApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Lifecycle"): + return &applyconfigurationscorev1.LifecycleApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LifecycleHandler"): + return &applyconfigurationscorev1.LifecycleHandlerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LimitRange"): + return &applyconfigurationscorev1.LimitRangeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LimitRangeItem"): + return &applyconfigurationscorev1.LimitRangeItemApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LimitRangeSpec"): + return &applyconfigurationscorev1.LimitRangeSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LinuxContainerUser"): + return &applyconfigurationscorev1.LinuxContainerUserApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LoadBalancerIngress"): + return &applyconfigurationscorev1.LoadBalancerIngressApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LoadBalancerStatus"): + return &applyconfigurationscorev1.LoadBalancerStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LocalObjectReference"): + return &applyconfigurationscorev1.LocalObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LocalVolumeSource"): + return &applyconfigurationscorev1.LocalVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ModifyVolumeStatus"): + return &applyconfigurationscorev1.ModifyVolumeStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Namespace"): + return &applyconfigurationscorev1.NamespaceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NamespaceCondition"): + return &applyconfigurationscorev1.NamespaceConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NamespaceSpec"): + return &applyconfigurationscorev1.NamespaceSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NamespaceStatus"): + return &applyconfigurationscorev1.NamespaceStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NFSVolumeSource"): + return &applyconfigurationscorev1.NFSVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Node"): + return &applyconfigurationscorev1.NodeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeAddress"): + return &applyconfigurationscorev1.NodeAddressApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeAffinity"): + return &applyconfigurationscorev1.NodeAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeCondition"): + return &applyconfigurationscorev1.NodeConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeConfigSource"): + return &applyconfigurationscorev1.NodeConfigSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeConfigStatus"): + return &applyconfigurationscorev1.NodeConfigStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeDaemonEndpoints"): + return &applyconfigurationscorev1.NodeDaemonEndpointsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeFeatures"): + return &applyconfigurationscorev1.NodeFeaturesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandler"): + return &applyconfigurationscorev1.NodeRuntimeHandlerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandlerFeatures"): + return &applyconfigurationscorev1.NodeRuntimeHandlerFeaturesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSelector"): + return &applyconfigurationscorev1.NodeSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSelectorRequirement"): + return &applyconfigurationscorev1.NodeSelectorRequirementApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSelectorTerm"): + return &applyconfigurationscorev1.NodeSelectorTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSpec"): + return &applyconfigurationscorev1.NodeSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeStatus"): + return &applyconfigurationscorev1.NodeStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSystemInfo"): + return &applyconfigurationscorev1.NodeSystemInfoApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ObjectFieldSelector"): + return &applyconfigurationscorev1.ObjectFieldSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ObjectReference"): + return &applyconfigurationscorev1.ObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolume"): + return &applyconfigurationscorev1.PersistentVolumeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"): + return &applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimCondition"): + return &applyconfigurationscorev1.PersistentVolumeClaimConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimSpec"): + return &applyconfigurationscorev1.PersistentVolumeClaimSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimStatus"): + return &applyconfigurationscorev1.PersistentVolumeClaimStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimTemplate"): + return &applyconfigurationscorev1.PersistentVolumeClaimTemplateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimVolumeSource"): + return &applyconfigurationscorev1.PersistentVolumeClaimVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeSource"): + return &applyconfigurationscorev1.PersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeSpec"): + return &applyconfigurationscorev1.PersistentVolumeSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeStatus"): + return &applyconfigurationscorev1.PersistentVolumeStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PhotonPersistentDiskVolumeSource"): + return &applyconfigurationscorev1.PhotonPersistentDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Pod"): + return &applyconfigurationscorev1.PodApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodAffinity"): + return &applyconfigurationscorev1.PodAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodAffinityTerm"): + return &applyconfigurationscorev1.PodAffinityTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodAntiAffinity"): + return &applyconfigurationscorev1.PodAntiAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodCondition"): + return &applyconfigurationscorev1.PodConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodDNSConfig"): + return &applyconfigurationscorev1.PodDNSConfigApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodDNSConfigOption"): + return &applyconfigurationscorev1.PodDNSConfigOptionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodIP"): + return &applyconfigurationscorev1.PodIPApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodOS"): + return &applyconfigurationscorev1.PodOSApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodReadinessGate"): + return &applyconfigurationscorev1.PodReadinessGateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodResourceClaim"): + return &applyconfigurationscorev1.PodResourceClaimApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodResourceClaimStatus"): + return &applyconfigurationscorev1.PodResourceClaimStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodSchedulingGate"): + return &applyconfigurationscorev1.PodSchedulingGateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodSecurityContext"): + return &applyconfigurationscorev1.PodSecurityContextApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodSpec"): + return &applyconfigurationscorev1.PodSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodStatus"): + return &applyconfigurationscorev1.PodStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodTemplate"): + return &applyconfigurationscorev1.PodTemplateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodTemplateSpec"): + return &applyconfigurationscorev1.PodTemplateSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PortStatus"): + return &applyconfigurationscorev1.PortStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PortworxVolumeSource"): + return &applyconfigurationscorev1.PortworxVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PreferredSchedulingTerm"): + return &applyconfigurationscorev1.PreferredSchedulingTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Probe"): + return &applyconfigurationscorev1.ProbeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ProbeHandler"): + return &applyconfigurationscorev1.ProbeHandlerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ProjectedVolumeSource"): + return &applyconfigurationscorev1.ProjectedVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("QuobyteVolumeSource"): + return &applyconfigurationscorev1.QuobyteVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("RBDPersistentVolumeSource"): + return &applyconfigurationscorev1.RBDPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("RBDVolumeSource"): + return &applyconfigurationscorev1.RBDVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationController"): + return &applyconfigurationscorev1.ReplicationControllerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationControllerCondition"): + return &applyconfigurationscorev1.ReplicationControllerConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationControllerSpec"): + return &applyconfigurationscorev1.ReplicationControllerSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationControllerStatus"): + return &applyconfigurationscorev1.ReplicationControllerStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceClaim"): + return &applyconfigurationscorev1.ResourceClaimApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceFieldSelector"): + return &applyconfigurationscorev1.ResourceFieldSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceHealth"): + return &applyconfigurationscorev1.ResourceHealthApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceQuota"): + return &applyconfigurationscorev1.ResourceQuotaApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceQuotaSpec"): + return &applyconfigurationscorev1.ResourceQuotaSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceQuotaStatus"): + return &applyconfigurationscorev1.ResourceQuotaStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceRequirements"): + return &applyconfigurationscorev1.ResourceRequirementsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceStatus"): + return &applyconfigurationscorev1.ResourceStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScaleIOPersistentVolumeSource"): + return &applyconfigurationscorev1.ScaleIOPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScaleIOVolumeSource"): + return &applyconfigurationscorev1.ScaleIOVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScopedResourceSelectorRequirement"): + return &applyconfigurationscorev1.ScopedResourceSelectorRequirementApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScopeSelector"): + return &applyconfigurationscorev1.ScopeSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SeccompProfile"): + return &applyconfigurationscorev1.SeccompProfileApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Secret"): + return &applyconfigurationscorev1.SecretApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretEnvSource"): + return &applyconfigurationscorev1.SecretEnvSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretKeySelector"): + return &applyconfigurationscorev1.SecretKeySelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretProjection"): + return &applyconfigurationscorev1.SecretProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretReference"): + return &applyconfigurationscorev1.SecretReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretVolumeSource"): + return &applyconfigurationscorev1.SecretVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecurityContext"): + return &applyconfigurationscorev1.SecurityContextApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SELinuxOptions"): + return &applyconfigurationscorev1.SELinuxOptionsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Service"): + return &applyconfigurationscorev1.ServiceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceAccount"): + return &applyconfigurationscorev1.ServiceAccountApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceAccountTokenProjection"): + return &applyconfigurationscorev1.ServiceAccountTokenProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServicePort"): + return &applyconfigurationscorev1.ServicePortApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceSpec"): + return &applyconfigurationscorev1.ServiceSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceStatus"): + return &applyconfigurationscorev1.ServiceStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SessionAffinityConfig"): + return &applyconfigurationscorev1.SessionAffinityConfigApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SleepAction"): + return &applyconfigurationscorev1.SleepActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("StorageOSPersistentVolumeSource"): + return &applyconfigurationscorev1.StorageOSPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("StorageOSVolumeSource"): + return &applyconfigurationscorev1.StorageOSVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Sysctl"): + return &applyconfigurationscorev1.SysctlApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Taint"): + return &applyconfigurationscorev1.TaintApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TCPSocketAction"): + return &applyconfigurationscorev1.TCPSocketActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Toleration"): + return &applyconfigurationscorev1.TolerationApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TopologySelectorLabelRequirement"): + return &applyconfigurationscorev1.TopologySelectorLabelRequirementApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TopologySelectorTerm"): + return &applyconfigurationscorev1.TopologySelectorTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TopologySpreadConstraint"): + return &applyconfigurationscorev1.TopologySpreadConstraintApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TypedLocalObjectReference"): + return &applyconfigurationscorev1.TypedLocalObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TypedObjectReference"): + return &applyconfigurationscorev1.TypedObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Volume"): + return &applyconfigurationscorev1.VolumeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeDevice"): + return &applyconfigurationscorev1.VolumeDeviceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeMount"): + return &applyconfigurationscorev1.VolumeMountApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeMountStatus"): + return &applyconfigurationscorev1.VolumeMountStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeNodeAffinity"): + return &applyconfigurationscorev1.VolumeNodeAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeProjection"): + return &applyconfigurationscorev1.VolumeProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeResourceRequirements"): + return &applyconfigurationscorev1.VolumeResourceRequirementsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeSource"): + return &applyconfigurationscorev1.VolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VsphereVirtualDiskVolumeSource"): + return &applyconfigurationscorev1.VsphereVirtualDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("WeightedPodAffinityTerm"): + return &applyconfigurationscorev1.WeightedPodAffinityTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("WindowsSecurityContextOptions"): + return &applyconfigurationscorev1.WindowsSecurityContextOptionsApplyConfiguration{} + + // Group=discovery.k8s.io, Version=v1 + case discoveryv1.SchemeGroupVersion.WithKind("Endpoint"): + return &applyconfigurationsdiscoveryv1.EndpointApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointConditions"): + return &applyconfigurationsdiscoveryv1.EndpointConditionsApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointHints"): + return &applyconfigurationsdiscoveryv1.EndpointHintsApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointPort"): + return &applyconfigurationsdiscoveryv1.EndpointPortApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointSlice"): + return &applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("ForZone"): + return &applyconfigurationsdiscoveryv1.ForZoneApplyConfiguration{} + + // Group=discovery.k8s.io, Version=v1beta1 + case discoveryv1beta1.SchemeGroupVersion.WithKind("Endpoint"): + return &applyconfigurationsdiscoveryv1beta1.EndpointApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointConditions"): + return &applyconfigurationsdiscoveryv1beta1.EndpointConditionsApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointHints"): + return &applyconfigurationsdiscoveryv1beta1.EndpointHintsApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointPort"): + return &applyconfigurationsdiscoveryv1beta1.EndpointPortApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointSlice"): + return &applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("ForZone"): + return &applyconfigurationsdiscoveryv1beta1.ForZoneApplyConfiguration{} + + // Group=events.k8s.io, Version=v1 + case eventsv1.SchemeGroupVersion.WithKind("Event"): + return &applyconfigurationseventsv1.EventApplyConfiguration{} + case eventsv1.SchemeGroupVersion.WithKind("EventSeries"): + return &applyconfigurationseventsv1.EventSeriesApplyConfiguration{} + + // Group=events.k8s.io, Version=v1beta1 + case eventsv1beta1.SchemeGroupVersion.WithKind("Event"): + return &applyconfigurationseventsv1beta1.EventApplyConfiguration{} + case eventsv1beta1.SchemeGroupVersion.WithKind("EventSeries"): + return &applyconfigurationseventsv1beta1.EventSeriesApplyConfiguration{} + + // Group=extensions, Version=v1beta1 + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet"): + return &applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetCondition"): + return &applyconfigurationsextensionsv1beta1.DaemonSetConditionApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetSpec"): + return &applyconfigurationsextensionsv1beta1.DaemonSetSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetStatus"): + return &applyconfigurationsextensionsv1beta1.DaemonSetStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"): + return &applyconfigurationsextensionsv1beta1.DaemonSetUpdateStrategyApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment"): + return &applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &applyconfigurationsextensionsv1beta1.DeploymentConditionApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &applyconfigurationsextensionsv1beta1.DeploymentSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &applyconfigurationsextensionsv1beta1.DeploymentStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &applyconfigurationsextensionsv1beta1.DeploymentStrategyApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"): + return &applyconfigurationsextensionsv1beta1.HTTPIngressPathApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"): + return &applyconfigurationsextensionsv1beta1.HTTPIngressRuleValueApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("Ingress"): + return &applyconfigurationsextensionsv1beta1.IngressApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressBackend"): + return &applyconfigurationsextensionsv1beta1.IngressBackendApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"): + return &applyconfigurationsextensionsv1beta1.IngressLoadBalancerIngressApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"): + return &applyconfigurationsextensionsv1beta1.IngressLoadBalancerStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressPortStatus"): + return &applyconfigurationsextensionsv1beta1.IngressPortStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressRule"): + return &applyconfigurationsextensionsv1beta1.IngressRuleApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressRuleValue"): + return &applyconfigurationsextensionsv1beta1.IngressRuleValueApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressSpec"): + return &applyconfigurationsextensionsv1beta1.IngressSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressStatus"): + return &applyconfigurationsextensionsv1beta1.IngressStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressTLS"): + return &applyconfigurationsextensionsv1beta1.IngressTLSApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IPBlock"): + return &applyconfigurationsextensionsv1beta1.IPBlockApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicy"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyEgressRule"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyEgressRuleApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyIngressRule"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyIngressRuleApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyPeer"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyPeerApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyPort"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyPortApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicySpec"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicySpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetCondition"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetConditionApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetSpec"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetStatus"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("RollbackConfig"): + return &applyconfigurationsextensionsv1beta1.RollbackConfigApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"): + return &applyconfigurationsextensionsv1beta1.RollingUpdateDaemonSetApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &applyconfigurationsextensionsv1beta1.RollingUpdateDeploymentApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("Scale"): + return &applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1 + case flowcontrolv1.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1.ExemptPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &applyconfigurationsflowcontrolv1.FlowDistinguisherMethodApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchema"): + return &applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &applyconfigurationsflowcontrolv1.FlowSchemaConditionApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &applyconfigurationsflowcontrolv1.FlowSchemaSpecApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &applyconfigurationsflowcontrolv1.FlowSchemaStatusApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("GroupSubject"): + return &applyconfigurationsflowcontrolv1.GroupSubjectApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1.LimitedPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("LimitResponse"): + return &applyconfigurationsflowcontrolv1.LimitResponseApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1.NonResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &applyconfigurationsflowcontrolv1.PolicyRulesWithSubjectsApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationConditionApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationReferenceApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationSpecApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationStatusApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &applyconfigurationsflowcontrolv1.QueuingConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1.ResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &applyconfigurationsflowcontrolv1.ServiceAccountSubjectApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsflowcontrolv1.SubjectApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("UserSubject"): + return &applyconfigurationsflowcontrolv1.UserSubjectApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.ExemptPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &applyconfigurationsflowcontrolv1beta1.FlowDistinguisherMethodApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchema"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaConditionApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaSpecApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaStatusApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("GroupSubject"): + return &applyconfigurationsflowcontrolv1beta1.GroupSubjectApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.LimitedPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("LimitResponse"): + return &applyconfigurationsflowcontrolv1beta1.LimitResponseApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta1.NonResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &applyconfigurationsflowcontrolv1beta1.PolicyRulesWithSubjectsApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationConditionApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationReferenceApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationSpecApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationStatusApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.QueuingConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta1.ResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &applyconfigurationsflowcontrolv1beta1.ServiceAccountSubjectApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsflowcontrolv1beta1.SubjectApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("UserSubject"): + return &applyconfigurationsflowcontrolv1beta1.UserSubjectApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta2 + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.ExemptPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &applyconfigurationsflowcontrolv1beta2.FlowDistinguisherMethodApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchema"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaConditionApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaSpecApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaStatusApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("GroupSubject"): + return &applyconfigurationsflowcontrolv1beta2.GroupSubjectApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.LimitedPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("LimitResponse"): + return &applyconfigurationsflowcontrolv1beta2.LimitResponseApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta2.NonResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &applyconfigurationsflowcontrolv1beta2.PolicyRulesWithSubjectsApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationConditionApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationReferenceApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationSpecApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationStatusApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.QueuingConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta2.ResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &applyconfigurationsflowcontrolv1beta2.ServiceAccountSubjectApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsflowcontrolv1beta2.SubjectApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("UserSubject"): + return &applyconfigurationsflowcontrolv1beta2.UserSubjectApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta3 + case v1beta3.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &flowcontrolv1beta3.ExemptPriorityLevelConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &flowcontrolv1beta3.FlowDistinguisherMethodApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchema"): + return &flowcontrolv1beta3.FlowSchemaApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &flowcontrolv1beta3.FlowSchemaConditionApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &flowcontrolv1beta3.FlowSchemaSpecApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &flowcontrolv1beta3.FlowSchemaStatusApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("GroupSubject"): + return &flowcontrolv1beta3.GroupSubjectApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &flowcontrolv1beta3.LimitedPriorityLevelConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("LimitResponse"): + return &flowcontrolv1beta3.LimitResponseApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &flowcontrolv1beta3.NonResourcePolicyRuleApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &flowcontrolv1beta3.PolicyRulesWithSubjectsApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &flowcontrolv1beta3.PriorityLevelConfigurationConditionApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &flowcontrolv1beta3.PriorityLevelConfigurationReferenceApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &flowcontrolv1beta3.PriorityLevelConfigurationSpecApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &flowcontrolv1beta3.PriorityLevelConfigurationStatusApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &flowcontrolv1beta3.QueuingConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &flowcontrolv1beta3.ResourcePolicyRuleApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &flowcontrolv1beta3.ServiceAccountSubjectApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("Subject"): + return &flowcontrolv1beta3.SubjectApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("UserSubject"): + return &flowcontrolv1beta3.UserSubjectApplyConfiguration{} + + // Group=imagepolicy.k8s.io, Version=v1alpha1 + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReview"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewApplyConfiguration{} + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewContainerSpec"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewContainerSpecApplyConfiguration{} + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewSpec"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewSpecApplyConfiguration{} + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewStatus"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewStatusApplyConfiguration{} + + // Group=internal.apiserver.k8s.io, Version=v1alpha1 + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("ServerStorageVersion"): + return &applyconfigurationsapiserverinternalv1alpha1.ServerStorageVersionApplyConfiguration{} + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersion"): + return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration{} + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersionCondition"): + return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionConditionApplyConfiguration{} + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersionStatus"): + return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionStatusApplyConfiguration{} + + // Group=meta.k8s.io, Version=v1 + case metav1.SchemeGroupVersion.WithKind("Condition"): + return &applyconfigurationsmetav1.ConditionApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("DeleteOptions"): + return &applyconfigurationsmetav1.DeleteOptionsApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("LabelSelector"): + return &applyconfigurationsmetav1.LabelSelectorApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("LabelSelectorRequirement"): + return &applyconfigurationsmetav1.LabelSelectorRequirementApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("ManagedFieldsEntry"): + return &applyconfigurationsmetav1.ManagedFieldsEntryApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("ObjectMeta"): + return &applyconfigurationsmetav1.ObjectMetaApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("OwnerReference"): + return &applyconfigurationsmetav1.OwnerReferenceApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("Preconditions"): + return &applyconfigurationsmetav1.PreconditionsApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("TypeMeta"): + return &applyconfigurationsmetav1.TypeMetaApplyConfiguration{} + + // Group=networking.k8s.io, Version=v1 + case networkingv1.SchemeGroupVersion.WithKind("HTTPIngressPath"): + return &applyconfigurationsnetworkingv1.HTTPIngressPathApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"): + return &applyconfigurationsnetworkingv1.HTTPIngressRuleValueApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("Ingress"): + return &applyconfigurationsnetworkingv1.IngressApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressBackend"): + return &applyconfigurationsnetworkingv1.IngressBackendApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressClass"): + return &applyconfigurationsnetworkingv1.IngressClassApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressClassParametersReference"): + return &applyconfigurationsnetworkingv1.IngressClassParametersReferenceApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressClassSpec"): + return &applyconfigurationsnetworkingv1.IngressClassSpecApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"): + return &applyconfigurationsnetworkingv1.IngressLoadBalancerIngressApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"): + return &applyconfigurationsnetworkingv1.IngressLoadBalancerStatusApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressPortStatus"): + return &applyconfigurationsnetworkingv1.IngressPortStatusApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressRule"): + return &applyconfigurationsnetworkingv1.IngressRuleApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressRuleValue"): + return &applyconfigurationsnetworkingv1.IngressRuleValueApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressServiceBackend"): + return &applyconfigurationsnetworkingv1.IngressServiceBackendApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressSpec"): + return &applyconfigurationsnetworkingv1.IngressSpecApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressStatus"): + return &applyconfigurationsnetworkingv1.IngressStatusApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressTLS"): + return &applyconfigurationsnetworkingv1.IngressTLSApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IPBlock"): + return &applyconfigurationsnetworkingv1.IPBlockApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy"): + return &applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyEgressRule"): + return &applyconfigurationsnetworkingv1.NetworkPolicyEgressRuleApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyIngressRule"): + return &applyconfigurationsnetworkingv1.NetworkPolicyIngressRuleApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyPeer"): + return &applyconfigurationsnetworkingv1.NetworkPolicyPeerApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyPort"): + return &applyconfigurationsnetworkingv1.NetworkPolicyPortApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicySpec"): + return &applyconfigurationsnetworkingv1.NetworkPolicySpecApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("ServiceBackendPort"): + return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{} + + // Group=networking.k8s.io, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddress"): + return &applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddressSpec"): + return &applyconfigurationsnetworkingv1alpha1.IPAddressSpecApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ParentReference"): + return &applyconfigurationsnetworkingv1alpha1.ParentReferenceApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR"): + return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRSpec"): + return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRSpecApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRStatus"): + return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRStatusApplyConfiguration{} + + // Group=networking.k8s.io, Version=v1beta1 + case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"): + return &applyconfigurationsnetworkingv1beta1.HTTPIngressPathApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"): + return &applyconfigurationsnetworkingv1beta1.HTTPIngressRuleValueApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("Ingress"): + return &applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressBackend"): + return &applyconfigurationsnetworkingv1beta1.IngressBackendApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClass"): + return &applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClassParametersReference"): + return &applyconfigurationsnetworkingv1beta1.IngressClassParametersReferenceApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClassSpec"): + return &applyconfigurationsnetworkingv1beta1.IngressClassSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"): + return &applyconfigurationsnetworkingv1beta1.IngressLoadBalancerIngressApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"): + return &applyconfigurationsnetworkingv1beta1.IngressLoadBalancerStatusApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressPortStatus"): + return &applyconfigurationsnetworkingv1beta1.IngressPortStatusApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressRule"): + return &applyconfigurationsnetworkingv1beta1.IngressRuleApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressRuleValue"): + return &applyconfigurationsnetworkingv1beta1.IngressRuleValueApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressSpec"): + return &applyconfigurationsnetworkingv1beta1.IngressSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressStatus"): + return &applyconfigurationsnetworkingv1beta1.IngressStatusApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressTLS"): + return &applyconfigurationsnetworkingv1beta1.IngressTLSApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IPAddress"): + return &applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IPAddressSpec"): + return &applyconfigurationsnetworkingv1beta1.IPAddressSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ParentReference"): + return &applyconfigurationsnetworkingv1beta1.ParentReferenceApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDR"): + return &applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDRSpec"): + return &applyconfigurationsnetworkingv1beta1.ServiceCIDRSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDRStatus"): + return &applyconfigurationsnetworkingv1beta1.ServiceCIDRStatusApplyConfiguration{} + + // Group=node.k8s.io, Version=v1 + case nodev1.SchemeGroupVersion.WithKind("Overhead"): + return &applyconfigurationsnodev1.OverheadApplyConfiguration{} + case nodev1.SchemeGroupVersion.WithKind("RuntimeClass"): + return &applyconfigurationsnodev1.RuntimeClassApplyConfiguration{} + case nodev1.SchemeGroupVersion.WithKind("Scheduling"): + return &applyconfigurationsnodev1.SchedulingApplyConfiguration{} + + // Group=node.k8s.io, Version=v1alpha1 + case nodev1alpha1.SchemeGroupVersion.WithKind("Overhead"): + return &applyconfigurationsnodev1alpha1.OverheadApplyConfiguration{} + case nodev1alpha1.SchemeGroupVersion.WithKind("RuntimeClass"): + return &applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration{} + case nodev1alpha1.SchemeGroupVersion.WithKind("RuntimeClassSpec"): + return &applyconfigurationsnodev1alpha1.RuntimeClassSpecApplyConfiguration{} + case nodev1alpha1.SchemeGroupVersion.WithKind("Scheduling"): + return &applyconfigurationsnodev1alpha1.SchedulingApplyConfiguration{} + + // Group=node.k8s.io, Version=v1beta1 + case nodev1beta1.SchemeGroupVersion.WithKind("Overhead"): + return &applyconfigurationsnodev1beta1.OverheadApplyConfiguration{} + case nodev1beta1.SchemeGroupVersion.WithKind("RuntimeClass"): + return &applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration{} + case nodev1beta1.SchemeGroupVersion.WithKind("Scheduling"): + return &applyconfigurationsnodev1beta1.SchedulingApplyConfiguration{} + + // Group=policy, Version=v1 + case policyv1.SchemeGroupVersion.WithKind("Eviction"): + return &applyconfigurationspolicyv1.EvictionApplyConfiguration{} + case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): + return &applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration{} + case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudgetSpec"): + return &applyconfigurationspolicyv1.PodDisruptionBudgetSpecApplyConfiguration{} + case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudgetStatus"): + return &applyconfigurationspolicyv1.PodDisruptionBudgetStatusApplyConfiguration{} + + // Group=policy, Version=v1beta1 + case policyv1beta1.SchemeGroupVersion.WithKind("Eviction"): + return &applyconfigurationspolicyv1beta1.EvictionApplyConfiguration{} + case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): + return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration{} + case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudgetSpec"): + return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetSpecApplyConfiguration{} + case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudgetStatus"): + return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetStatusApplyConfiguration{} + + // Group=rbac.authorization.k8s.io, Version=v1 + case rbacv1.SchemeGroupVersion.WithKind("AggregationRule"): + return &applyconfigurationsrbacv1.AggregationRuleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("ClusterRole"): + return &applyconfigurationsrbacv1.ClusterRoleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): + return &applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("PolicyRule"): + return &applyconfigurationsrbacv1.PolicyRuleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("Role"): + return &applyconfigurationsrbacv1.RoleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("RoleBinding"): + return &applyconfigurationsrbacv1.RoleBindingApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("RoleRef"): + return &applyconfigurationsrbacv1.RoleRefApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsrbacv1.SubjectApplyConfiguration{} + + // Group=rbac.authorization.k8s.io, Version=v1alpha1 + case rbacv1alpha1.SchemeGroupVersion.WithKind("AggregationRule"): + return &applyconfigurationsrbacv1alpha1.AggregationRuleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("ClusterRole"): + return &applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): + return &applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("PolicyRule"): + return &applyconfigurationsrbacv1alpha1.PolicyRuleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("Role"): + return &applyconfigurationsrbacv1alpha1.RoleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("RoleBinding"): + return &applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("RoleRef"): + return &applyconfigurationsrbacv1alpha1.RoleRefApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsrbacv1alpha1.SubjectApplyConfiguration{} + + // Group=rbac.authorization.k8s.io, Version=v1beta1 + case rbacv1beta1.SchemeGroupVersion.WithKind("AggregationRule"): + return &applyconfigurationsrbacv1beta1.AggregationRuleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRole"): + return &applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): + return &applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("PolicyRule"): + return &applyconfigurationsrbacv1beta1.PolicyRuleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("Role"): + return &applyconfigurationsrbacv1beta1.RoleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("RoleBinding"): + return &applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("RoleRef"): + return &applyconfigurationsrbacv1beta1.RoleRefApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsrbacv1beta1.SubjectApplyConfiguration{} + + // Group=resource.k8s.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithKind("AllocationResult"): + return &resourcev1alpha3.AllocationResultApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("BasicDevice"): + return &resourcev1alpha3.BasicDeviceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("CELDeviceSelector"): + return &resourcev1alpha3.CELDeviceSelectorApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("Device"): + return &resourcev1alpha3.DeviceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceAllocationConfiguration"): + return &resourcev1alpha3.DeviceAllocationConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceAllocationResult"): + return &resourcev1alpha3.DeviceAllocationResultApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceAttribute"): + return &resourcev1alpha3.DeviceAttributeApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClaim"): + return &resourcev1alpha3.DeviceClaimApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClaimConfiguration"): + return &resourcev1alpha3.DeviceClaimConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClass"): + return &resourcev1alpha3.DeviceClassApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClassConfiguration"): + return &resourcev1alpha3.DeviceClassConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClassSpec"): + return &resourcev1alpha3.DeviceClassSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceConfiguration"): + return &resourcev1alpha3.DeviceConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceConstraint"): + return &resourcev1alpha3.DeviceConstraintApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceRequest"): + return &resourcev1alpha3.DeviceRequestApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceRequestAllocationResult"): + return &resourcev1alpha3.DeviceRequestAllocationResultApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceSelector"): + return &resourcev1alpha3.DeviceSelectorApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("OpaqueDeviceConfiguration"): + return &resourcev1alpha3.OpaqueDeviceConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext"): + return &resourcev1alpha3.PodSchedulingContextApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextSpec"): + return &resourcev1alpha3.PodSchedulingContextSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextStatus"): + return &resourcev1alpha3.PodSchedulingContextStatusApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim"): + return &resourcev1alpha3.ResourceClaimApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): + return &resourcev1alpha3.ResourceClaimConsumerReferenceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSchedulingStatus"): + return &resourcev1alpha3.ResourceClaimSchedulingStatusApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSpec"): + return &resourcev1alpha3.ResourceClaimSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimStatus"): + return &resourcev1alpha3.ResourceClaimStatusApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate"): + return &resourcev1alpha3.ResourceClaimTemplateApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplateSpec"): + return &resourcev1alpha3.ResourceClaimTemplateSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourcePool"): + return &resourcev1alpha3.ResourcePoolApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice"): + return &resourcev1alpha3.ResourceSliceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceSliceSpec"): + return &resourcev1alpha3.ResourceSliceSpecApplyConfiguration{} + + // Group=scheduling.k8s.io, Version=v1 + case schedulingv1.SchemeGroupVersion.WithKind("PriorityClass"): + return &applyconfigurationsschedulingv1.PriorityClassApplyConfiguration{} + + // Group=scheduling.k8s.io, Version=v1alpha1 + case schedulingv1alpha1.SchemeGroupVersion.WithKind("PriorityClass"): + return &applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration{} + + // Group=scheduling.k8s.io, Version=v1beta1 + case schedulingv1beta1.SchemeGroupVersion.WithKind("PriorityClass"): + return &applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration{} + + // Group=storage.k8s.io, Version=v1 + case storagev1.SchemeGroupVersion.WithKind("CSIDriver"): + return &applyconfigurationsstoragev1.CSIDriverApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSIDriverSpec"): + return &applyconfigurationsstoragev1.CSIDriverSpecApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSINode"): + return &applyconfigurationsstoragev1.CSINodeApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSINodeDriver"): + return &applyconfigurationsstoragev1.CSINodeDriverApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSINodeSpec"): + return &applyconfigurationsstoragev1.CSINodeSpecApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSIStorageCapacity"): + return &applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("StorageClass"): + return &applyconfigurationsstoragev1.StorageClassApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("TokenRequest"): + return &applyconfigurationsstoragev1.TokenRequestApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachment"): + return &applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"): + return &applyconfigurationsstoragev1.VolumeAttachmentSourceApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"): + return &applyconfigurationsstoragev1.VolumeAttachmentSpecApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"): + return &applyconfigurationsstoragev1.VolumeAttachmentStatusApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeError"): + return &applyconfigurationsstoragev1.VolumeErrorApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeNodeResources"): + return &applyconfigurationsstoragev1.VolumeNodeResourcesApplyConfiguration{} + + // Group=storage.k8s.io, Version=v1alpha1 + case storagev1alpha1.SchemeGroupVersion.WithKind("CSIStorageCapacity"): + return &applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachment"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentSourceApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentSpecApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentStatusApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass"): + return &applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeError"): + return &applyconfigurationsstoragev1alpha1.VolumeErrorApplyConfiguration{} + + // Group=storage.k8s.io, Version=v1beta1 + case storagev1beta1.SchemeGroupVersion.WithKind("CSIDriver"): + return &applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSIDriverSpec"): + return &applyconfigurationsstoragev1beta1.CSIDriverSpecApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSINode"): + return &applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSINodeDriver"): + return &applyconfigurationsstoragev1beta1.CSINodeDriverApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSINodeSpec"): + return &applyconfigurationsstoragev1beta1.CSINodeSpecApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSIStorageCapacity"): + return &applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("StorageClass"): + return &applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("TokenRequest"): + return &applyconfigurationsstoragev1beta1.TokenRequestApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachment"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentSourceApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentSpecApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentStatusApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass"): + return &applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeError"): + return &applyconfigurationsstoragev1beta1.VolumeErrorApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeNodeResources"): + return &applyconfigurationsstoragev1beta1.VolumeNodeResourcesApplyConfiguration{} + + // Group=storagemigration.k8s.io, Version=v1alpha1 + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("GroupVersionResource"): + return &applyconfigurationsstoragemigrationv1alpha1.GroupVersionResourceApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("MigrationCondition"): + return &applyconfigurationsstoragemigrationv1alpha1.MigrationConditionApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration"): + return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationSpec"): + return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationSpecApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationStatus"): + return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationStatusApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go index f8a78e1ef4..e5d9e7f800 100644 --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -47,7 +47,9 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*me Verb: "get", Resource: schema.GroupVersionResource{Resource: "resource"}, } - c.Invokes(action, nil) + if _, err := c.Invokes(action, nil); err != nil { + return nil, err + } for _, resourceList := range c.Resources { if resourceList.GroupVersion == groupVersion { return resourceList, nil @@ -77,7 +79,9 @@ func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav Verb: "get", Resource: schema.GroupVersionResource{Resource: "resource"}, } - c.Invokes(action, nil) + if _, err = c.Invokes(action, nil); err != nil { + return resultGroups, c.Resources, err + } return resultGroups, c.Resources, nil } @@ -100,7 +104,9 @@ func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { Verb: "get", Resource: schema.GroupVersionResource{Resource: "group"}, } - c.Invokes(action, nil) + if _, err := c.Invokes(action, nil); err != nil { + return nil, err + } groups := map[string]*metav1.APIGroup{} diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 4b54859530..326da7cbdf 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,6 +30,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" ) type DynamicClient struct { @@ -293,6 +297,24 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav } func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for %v, falling back to the standard LIST semantics, err = %v", c.resource, watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := c.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("watchlist request for %v", c.resource), c.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %v ended with an error, falling back to the standard LIST semantics, err = %v", c.resource, err) + } + result, err := c.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("list request for %v", c.resource), c.list, opts, result) + } + return result, err +} + +func (c *dynamicResourceClient) list(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { if err := validateNamespaceWithOptionalName(c.namespace); err != nil { return nil, err } @@ -319,6 +341,27 @@ func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOption return list, nil } +// watchList establishes a watch stream with the server and returns an unstructured list. +func (c *dynamicResourceClient) watchList(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return nil, err + } + + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + + result := &unstructured.UnstructuredList{} + err := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + WatchList(ctx). + Into(result) + + return result, err +} + func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { opts.Watch = true if err := validateNamespaceWithOptionalName(c.namespace); err != nil { diff --git a/vendor/k8s.io/client-go/features/envvar.go b/vendor/k8s.io/client-go/features/envvar.go index f9edfdf0d9..8c3f887dc4 100644 --- a/vendor/k8s.io/client-go/features/envvar.go +++ b/vendor/k8s.io/client-go/features/envvar.go @@ -47,6 +47,10 @@ var _ Gates = &envVarFeatureGates{} // // Please note that environmental variables can only be set to the boolean value. // Incorrect values will be ignored and logged. +// +// Features can also be set directly via the Set method. +// In that case, these features take precedence over +// features set via environmental variables. func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates { known := map[Feature]FeatureSpec{} for name, spec := range features { @@ -57,7 +61,8 @@ func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates callSiteName: naming.GetNameFromCallsite(internalPackages...), known: known, } - fg.enabled.Store(map[Feature]bool{}) + fg.enabledViaEnvVar.Store(map[Feature]bool{}) + fg.enabledViaSetMethod = map[Feature]bool{} return fg } @@ -74,17 +79,34 @@ type envVarFeatureGates struct { // known holds known feature gates known map[Feature]FeatureSpec - // enabled holds a map[Feature]bool + // enabledViaEnvVar holds a map[Feature]bool // with values explicitly set via env var - enabled atomic.Value + enabledViaEnvVar atomic.Value + + // lockEnabledViaSetMethod protects enabledViaSetMethod + lockEnabledViaSetMethod sync.RWMutex + + // enabledViaSetMethod holds values explicitly set + // via Set method, features stored in this map take + // precedence over features stored in enabledViaEnvVar + enabledViaSetMethod map[Feature]bool // readEnvVars holds the boolean value which // indicates whether readEnvVarsOnce has been called. readEnvVars atomic.Bool } -// Enabled returns true if the key is enabled. If the key is not known, this call will panic. +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. func (f *envVarFeatureGates) Enabled(key Feature) bool { + if v, ok := f.wasFeatureEnabledViaSetMethod(key); ok { + // ensue that the state of all known features + // is loaded from environment variables + // on the first call to Enabled method. + if !f.hasAlreadyReadEnvVar() { + _ = f.getEnabledMapFromEnvVar() + } + return v + } if v, ok := f.getEnabledMapFromEnvVar()[key]; ok { return v } @@ -94,6 +116,26 @@ func (f *envVarFeatureGates) Enabled(key Feature) bool { panic(fmt.Errorf("feature %q is not registered in FeatureGates %q", key, f.callSiteName)) } +// Set sets the given feature to the given value. +// +// Features set via this method take precedence over +// the features set via environment variables. +func (f *envVarFeatureGates) Set(featureName Feature, featureValue bool) error { + feature, ok := f.known[featureName] + if !ok { + return fmt.Errorf("feature %q is not registered in FeatureGates %q", featureName, f.callSiteName) + } + if feature.LockToDefault && feature.Default != featureValue { + return fmt.Errorf("cannot set feature gate %q to %v, feature is locked to %v", featureName, featureValue, feature.Default) + } + + f.lockEnabledViaSetMethod.Lock() + defer f.lockEnabledViaSetMethod.Unlock() + f.enabledViaSetMethod[featureName] = featureValue + + return nil +} + // getEnabledMapFromEnvVar will fill the enabled map on the first call. // This is the only time a known feature can be set to a value // read from the corresponding environmental variable. @@ -119,7 +161,7 @@ func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool { featureGatesState[feature] = boolVal } } - f.enabled.Store(featureGatesState) + f.enabledViaEnvVar.Store(featureGatesState) f.readEnvVars.Store(true) for feature, featureSpec := range f.known { @@ -130,7 +172,15 @@ func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool { klog.V(1).InfoS("Feature gate default state", "feature", feature, "enabled", featureSpec.Default) } }) - return f.enabled.Load().(map[Feature]bool) + return f.enabledViaEnvVar.Load().(map[Feature]bool) +} + +func (f *envVarFeatureGates) wasFeatureEnabledViaSetMethod(key Feature) (bool, bool) { + f.lockEnabledViaSetMethod.RLock() + defer f.lockEnabledViaSetMethod.RUnlock() + + value, found := f.enabledViaSetMethod[key] + return value, found } func (f *envVarFeatureGates) hasAlreadyReadEnvVar() bool { diff --git a/vendor/k8s.io/client-go/gentype/type.go b/vendor/k8s.io/client-go/gentype/type.go new file mode 100644 index 0000000000..b5be84318d --- /dev/null +++ b/vendor/k8s.io/client-go/gentype/type.go @@ -0,0 +1,360 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gentype + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" +) + +// objectWithMeta matches objects implementing both runtime.Object and metav1.Object. +type objectWithMeta interface { + runtime.Object + metav1.Object +} + +// namedObject matches comparable objects implementing GetName(); it is intended for use with apply declarative configurations. +type namedObject interface { + comparable + GetName() *string +} + +// Client represents a client, optionally namespaced, with no support for lists or apply declarative configurations. +type Client[T objectWithMeta] struct { + resource string + client rest.Interface + namespace string // "" for non-namespaced clients + newObject func() T + parameterCodec runtime.ParameterCodec +} + +// ClientWithList represents a client with support for lists. +type ClientWithList[T objectWithMeta, L runtime.Object] struct { + *Client[T] + alsoLister[T, L] +} + +// ClientWithApply represents a client with support for apply declarative configurations. +type ClientWithApply[T objectWithMeta, C namedObject] struct { + *Client[T] + alsoApplier[T, C] +} + +// ClientWithListAndApply represents a client with support for lists and apply declarative configurations. +type ClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct { + *Client[T] + alsoLister[T, L] + alsoApplier[T, C] +} + +// Helper types for composition +type alsoLister[T objectWithMeta, L runtime.Object] struct { + client *Client[T] + newList func() L +} + +type alsoApplier[T objectWithMeta, C namedObject] struct { + client *Client[T] +} + +// NewClient constructs a client, namespaced or not, with no support for lists or apply. +// Non-namespaced clients are constructed by passing an empty namespace (""). +func NewClient[T objectWithMeta]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, +) *Client[T] { + return &Client[T]{ + resource: resource, + client: client, + parameterCodec: parameterCodec, + namespace: namespace, + newObject: emptyObjectCreator, + } +} + +// NewClientWithList constructs a namespaced client with support for lists. +func NewClientWithList[T objectWithMeta, L runtime.Object]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, +) *ClientWithList[T, L] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithList[T, L]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + } +} + +// NewClientWithApply constructs a namespaced client with support for apply declarative configurations. +func NewClientWithApply[T objectWithMeta, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, +) *ClientWithApply[T, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithApply[T, C]{ + typeClient, + alsoApplier[T, C]{typeClient}, + } +} + +// NewClientWithListAndApply constructs a client with support for lists and applying declarative configurations. +func NewClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, +) *ClientWithListAndApply[T, L, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithListAndApply[T, L, C]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + alsoApplier[T, C]{typeClient}, + } +} + +// GetClient returns the REST interface. +func (c *Client[T]) GetClient() rest.Interface { + return c.client +} + +// GetNamespace returns the client's namespace, if any. +func (c *Client[T]) GetNamespace() string { + return c.namespace +} + +// Get takes name of the resource, and returns the corresponding object, and an error if there is any. +func (c *Client[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) { + result := c.newObject() + err := c.client.Get(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + VersionedParams(&options, c.parameterCodec). + Do(ctx). + Into(result) + return result, err +} + +// List takes label and field selectors, and returns the list of resources that match those selectors. +func (l *alsoLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (L, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := l.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, "watchlist request for "+l.client.resource, l.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %s ended with an error, falling back to the standard LIST semantics, err = %v", l.client.resource, err) + } + result, err := l.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, "list request for "+l.client.resource, l.list, opts, result) + } + return result, err +} + +func (l *alsoLister[T, L]) list(ctx context.Context, opts metav1.ListOptions) (L, error) { + list := l.newList() + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + err := l.client.client.Get(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + Do(ctx). + Into(list) + return list, err +} + +// watchList establishes a watch stream with the server and returns the list of resources. +func (l *alsoLister[T, L]) watchList(ctx context.Context, opts metav1.ListOptions) (result L, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = l.newList() + err = l.client.client.Get(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + WatchList(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resources. +func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resource and creates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions) (T, error) { + result := c.newObject() + err := c.client.Post(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// UpdateStatus updates the status subresource of a resource. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + SubResource("status"). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Delete takes name of the resource and deletes it. Returns an error if one occurs. +func (c *Client[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return l.client.client.Delete(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&listOpts, l.client.parameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resource. +func (c *Client[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) { + result := c.newObject() + err := c.client.Patch(pt). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, c.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resource. +func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + result := a.client.newObject() + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(obj) + if err != nil { + return *new(T), err + } + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + err = a.client.client.Patch(types.ApplyPatchType). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + VersionedParams(&patchOpts, a.client.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it to the status subresource and returns the applied resource. +func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(obj) + if err != nil { + return *new(T), err + } + + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + + result := a.client.newObject() + err = a.client.client.Patch(types.ApplyPatchType). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + SubResource("status"). + VersionedParams(&patchOpts, a.client.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/informers/coordination/interface.go b/vendor/k8s.io/client-go/informers/coordination/interface.go index 54cfd7b9f2..026b4d9476 100644 --- a/vendor/k8s.io/client-go/informers/coordination/interface.go +++ b/vendor/k8s.io/client-go/informers/coordination/interface.go @@ -20,6 +20,7 @@ package coordination import ( v1 "k8s.io/client-go/informers/coordination/v1" + v1alpha1 "k8s.io/client-go/informers/coordination/v1alpha1" v1beta1 "k8s.io/client-go/informers/coordination/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go new file mode 100644 index 0000000000..4058af2806 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // LeaseCandidates returns a LeaseCandidateInformer. + LeaseCandidates() LeaseCandidateInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// LeaseCandidates returns a LeaseCandidateInformer. +func (v *version) LeaseCandidates() LeaseCandidateInformer { + return &leaseCandidateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..21bc47a8e6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/coordination/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// LeaseCandidateInformer provides access to a shared informer and lister for +// LeaseCandidates. +type LeaseCandidateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.LeaseCandidateLister +} + +type leaseCandidateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewLeaseCandidateInformer constructs a new informer for LeaseCandidate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLeaseCandidateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLeaseCandidateInformer constructs a new informer for LeaseCandidate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1alpha1().LeaseCandidates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1alpha1().LeaseCandidates(namespace).Watch(context.TODO(), options) + }, + }, + &coordinationv1alpha1.LeaseCandidate{}, + resyncPeriod, + indexers, + ) +} + +func (f *leaseCandidateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLeaseCandidateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *leaseCandidateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&coordinationv1alpha1.LeaseCandidate{}, f.defaultInformer) +} + +func (f *leaseCandidateInformer) Lister() v1alpha1.LeaseCandidateLister { + return v1alpha1.NewLeaseCandidateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index f2fef0e0b2..86c24551ef 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -247,6 +247,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index d85117587c..39a9d3bf4f 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -38,6 +38,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -60,7 +61,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -198,6 +199,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case coordinationv1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1().Leases().Informer()}, nil + // Group=coordination.k8s.io, Version=v1alpha1 + case coordinationv1alpha1.SchemeGroupVersion.WithResource("leasecandidates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1alpha1().LeaseCandidates().Informer()}, nil + // Group=coordination.k8s.io, Version=v1beta1 case coordinationv1beta1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1beta1().Leases().Informer()}, nil @@ -307,10 +312,14 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServiceCIDRs().Informer()}, nil // Group=networking.k8s.io, Version=v1beta1 + case networkingv1beta1.SchemeGroupVersion.WithResource("ipaddresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IPAddresses().Informer()}, nil case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil case networkingv1beta1.SchemeGroupVersion.WithResource("ingressclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IngressClasses().Informer()}, nil + case networkingv1beta1.SchemeGroupVersion.WithResource("servicecidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().ServiceCIDRs().Informer()}, nil // Group=node.k8s.io, Version=v1 case nodev1.SchemeGroupVersion.WithResource("runtimeclasses"): @@ -362,21 +371,17 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil - // Group=resource.k8s.io, Version=v1alpha2 - case v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulingContexts().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimparameters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimParameters().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimTemplates().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClasses().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclassparameters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClassParameters().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceslices"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceSlices().Informer()}, nil + // Group=resource.k8s.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithResource("deviceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().DeviceClasses().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().PodSchedulingContexts().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaims().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaimTemplates().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceSlices().Informer()}, nil // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): @@ -421,6 +426,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil case storagev1beta1.SchemeGroupVersion.WithResource("volumeattachments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttachments().Informer()}, nil + case storagev1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttributesClasses().Informer()}, nil // Group=storagemigration.k8s.io, Version=v1alpha1 case storagemigrationv1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations"): diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go index 2dcc3129a5..974a8fd5bf 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go @@ -24,10 +24,14 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // IPAddresses returns a IPAddressInformer. + IPAddresses() IPAddressInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer // IngressClasses returns a IngressClassInformer. IngressClasses() IngressClassInformer + // ServiceCIDRs returns a ServiceCIDRInformer. + ServiceCIDRs() ServiceCIDRInformer } type version struct { @@ -41,6 +45,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// IPAddresses returns a IPAddressInformer. +func (v *version) IPAddresses() IPAddressInformer { + return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // Ingresses returns a IngressInformer. func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} @@ -50,3 +59,8 @@ func (v *version) Ingresses() IngressInformer { func (v *version) IngressClasses() IngressClassInformer { return &ingressClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// ServiceCIDRs returns a ServiceCIDRInformer. +func (v *version) ServiceCIDRs() ServiceCIDRInformer { + return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..2a2dfa2907 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// IPAddressInformer provides access to a shared informer and lister for +// IPAddresses. +type IPAddressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.IPAddressLister +} + +type iPAddressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().IPAddresses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().IPAddresses().Watch(context.TODO(), options) + }, + }, + &networkingv1beta1.IPAddress{}, + resyncPeriod, + indexers, + ) +} + +func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *iPAddressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1beta1.IPAddress{}, f.defaultInformer) +} + +func (f *iPAddressInformer) Lister() v1beta1.IPAddressLister { + return v1beta1.NewIPAddressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go similarity index 52% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go rename to vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go index cb76d78fe4..d5a9ce0146 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go @@ -16,74 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1beta1 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + networkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) -// ResourceClassInformer provides access to a shared informer and lister for -// ResourceClasses. -type ResourceClassInformer interface { +// ServiceCIDRInformer provides access to a shared informer and lister for +// ServiceCIDRs. +type ServiceCIDRInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClassLister + Lister() v1beta1.ServiceCIDRLister } -type resourceClassInformer struct { +type serviceCIDRInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc } -// NewResourceClassInformer constructs a new informer for ResourceClass type. +// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClassInformer(client, resyncPeriod, indexers, nil) +func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredResourceClassInformer constructs a new informer for ResourceClass type. +// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClasses().List(context.TODO(), options) + return client.NetworkingV1beta1().ServiceCIDRs().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClasses().Watch(context.TODO(), options) + return client.NetworkingV1beta1().ServiceCIDRs().Watch(context.TODO(), options) }, }, - &resourcev1alpha2.ResourceClass{}, + &networkingv1beta1.ServiceCIDR{}, resyncPeriod, indexers, ) } -func (f *resourceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *resourceClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClass{}, f.defaultInformer) +func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1beta1.ServiceCIDR{}, f.defaultInformer) } -func (f *resourceClassInformer) Lister() v1alpha2.ResourceClassLister { - return v1alpha2.NewResourceClassLister(f.Informer().GetIndexer()) +func (f *serviceCIDRInformer) Lister() v1beta1.ServiceCIDRLister { + return v1beta1.NewServiceCIDRLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/interface.go b/vendor/k8s.io/client-go/informers/resource/interface.go index 3fcce8ae9d..170d29d808 100644 --- a/vendor/k8s.io/client-go/informers/resource/interface.go +++ b/vendor/k8s.io/client-go/informers/resource/interface.go @@ -20,13 +20,13 @@ package resource import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1alpha2 "k8s.io/client-go/informers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/informers/resource/v1alpha3" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1alpha2 provides access to shared informers for resources in V1alpha2. - V1alpha2() v1alpha2.Interface + // V1alpha3 provides access to shared informers for resources in V1alpha3. + V1alpha3() v1alpha3.Interface } type group struct { @@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1alpha2 returns a new v1alpha2.Interface. -func (g *group) V1alpha2() v1alpha2.Interface { - return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +// V1alpha3 returns a new v1alpha3.Interface. +func (g *group) V1alpha3() v1alpha3.Interface { + return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimparameters.go b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index 3064ac9f55..0000000000 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceClaimParametersInformer provides access to a shared informer and lister for -// ResourceClaimParameters. -type ResourceClaimParametersInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimParametersLister -} - -type resourceClaimParametersInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewResourceClaimParametersInformer constructs a new informer for ResourceClaimParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceClaimParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClaimParametersInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceClaimParametersInformer constructs a new informer for ResourceClaimParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClaimParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaimParameters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaimParameters(namespace).Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceClaimParameters{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceClaimParametersInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClaimParametersInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceClaimParametersInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaimParameters{}, f.defaultInformer) -} - -func (f *resourceClaimParametersInformer) Lister() v1alpha2.ResourceClaimParametersLister { - return v1alpha2.NewResourceClaimParametersLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclassparameters.go b/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 71fbefe162..0000000000 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceClassParametersInformer provides access to a shared informer and lister for -// ResourceClassParameters. -type ResourceClassParametersInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClassParametersLister -} - -type resourceClassParametersInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewResourceClassParametersInformer constructs a new informer for ResourceClassParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceClassParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClassParametersInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceClassParametersInformer constructs a new informer for ResourceClassParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClassParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClassParameters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClassParameters(namespace).Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceClassParameters{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceClassParametersInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClassParametersInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceClassParametersInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClassParameters{}, f.defaultInformer) -} - -func (f *resourceClassParametersInformer) Lister() v1alpha2.ResourceClassParametersLister { - return v1alpha2.NewResourceClassParametersLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..c0bcbd1905 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + time "time" + + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassInformer provides access to a shared informer and lister for +// DeviceClasses. +type DeviceClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.DeviceClassLister +} + +type deviceClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().DeviceClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().DeviceClasses().Watch(context.TODO(), options) + }, + }, + &resourcev1alpha3.DeviceClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *deviceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deviceClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resourcev1alpha3.DeviceClass{}, f.defaultInformer) +} + +func (f *deviceClassInformer) Lister() v1alpha3.DeviceClassLister { + return v1alpha3.NewDeviceClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go similarity index 70% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go index aa4a5ae7dc..481a7de451 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" @@ -24,18 +24,14 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // DeviceClasses returns a DeviceClassInformer. + DeviceClasses() DeviceClassInformer // PodSchedulingContexts returns a PodSchedulingContextInformer. PodSchedulingContexts() PodSchedulingContextInformer // ResourceClaims returns a ResourceClaimInformer. ResourceClaims() ResourceClaimInformer - // ResourceClaimParameters returns a ResourceClaimParametersInformer. - ResourceClaimParameters() ResourceClaimParametersInformer // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. ResourceClaimTemplates() ResourceClaimTemplateInformer - // ResourceClasses returns a ResourceClassInformer. - ResourceClasses() ResourceClassInformer - // ResourceClassParameters returns a ResourceClassParametersInformer. - ResourceClassParameters() ResourceClassParametersInformer // ResourceSlices returns a ResourceSliceInformer. ResourceSlices() ResourceSliceInformer } @@ -51,6 +47,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// DeviceClasses returns a DeviceClassInformer. +func (v *version) DeviceClasses() DeviceClassInformer { + return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // PodSchedulingContexts returns a PodSchedulingContextInformer. func (v *version) PodSchedulingContexts() PodSchedulingContextInformer { return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} @@ -61,26 +62,11 @@ func (v *version) ResourceClaims() ResourceClaimInformer { return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// ResourceClaimParameters returns a ResourceClaimParametersInformer. -func (v *version) ResourceClaimParameters() ResourceClaimParametersInformer { - return &resourceClaimParametersInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer { return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// ResourceClasses returns a ResourceClassInformer. -func (v *version) ResourceClasses() ResourceClassInformer { - return &resourceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// ResourceClassParameters returns a ResourceClassParametersInformer. -func (v *version) ResourceClassParameters() ResourceClassParametersInformer { - return &resourceClassParametersInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - // ResourceSlices returns a ResourceSliceInformer. func (v *version) ResourceSlices() ResourceSliceInformer { return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go similarity index 86% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go index b4aabb3761..62fb3614fc 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodSchedulingContexts. type PodSchedulingContextInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.PodSchedulingContextLister + Lister() v1alpha3.PodSchedulingContextLister } type podSchedulingContextInformer struct { @@ -62,16 +62,16 @@ func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namesp if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().PodSchedulingContexts(namespace).List(context.TODO(), options) + return client.ResourceV1alpha3().PodSchedulingContexts(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().PodSchedulingContexts(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha3().PodSchedulingContexts(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha2.PodSchedulingContext{}, + &resourcev1alpha3.PodSchedulingContext{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interfa } func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.PodSchedulingContext{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha3.PodSchedulingContext{}, f.defaultInformer) } -func (f *podSchedulingContextInformer) Lister() v1alpha2.PodSchedulingContextLister { - return v1alpha2.NewPodSchedulingContextLister(f.Informer().GetIndexer()) +func (f *podSchedulingContextInformer) Lister() v1alpha3.PodSchedulingContextLister { + return v1alpha3.NewPodSchedulingContextLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go similarity index 85% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go index 3af9368919..fa644579b1 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaims. type ResourceClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimLister + Lister() v1alpha3.ResourceClaimLister } type resourceClaimInformer struct { @@ -62,16 +62,16 @@ func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaims(namespace).List(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaims(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaims(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaims(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha2.ResourceClaim{}, + &resourcev1alpha3.ResourceClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaim{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha3.ResourceClaim{}, f.defaultInformer) } -func (f *resourceClaimInformer) Lister() v1alpha2.ResourceClaimLister { - return v1alpha2.NewResourceClaimLister(f.Informer().GetIndexer()) +func (f *resourceClaimInformer) Lister() v1alpha3.ResourceClaimLister { + return v1alpha3.NewResourceClaimLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go similarity index 86% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go index 13f4ad835c..294755661c 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaimTemplates. type ResourceClaimTemplateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimTemplateLister + Lister() v1alpha3.ResourceClaimTemplateLister } type resourceClaimTemplateInformer struct { @@ -62,16 +62,16 @@ func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, names if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).List(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha2.ResourceClaimTemplate{}, + &resourcev1alpha3.ResourceClaimTemplate{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interf } func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaimTemplate{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha3.ResourceClaimTemplate{}, f.defaultInformer) } -func (f *resourceClaimTemplateInformer) Lister() v1alpha2.ResourceClaimTemplateLister { - return v1alpha2.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) +func (f *resourceClaimTemplateInformer) Lister() v1alpha3.ResourceClaimTemplateLister { + return v1alpha3.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceslice.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go similarity index 85% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceslice.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go index da9d2a0243..108083530c 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceslice.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceSlices. type ResourceSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceSliceLister + Lister() v1alpha3.ResourceSliceLister } type resourceSliceInformer struct { @@ -61,16 +61,16 @@ func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceSlices().List(context.TODO(), options) + return client.ResourceV1alpha3().ResourceSlices().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceSlices().Watch(context.TODO(), options) + return client.ResourceV1alpha3().ResourceSlices().Watch(context.TODO(), options) }, }, - &resourcev1alpha2.ResourceSlice{}, + &resourcev1alpha3.ResourceSlice{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceSlice{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha3.ResourceSlice{}, f.defaultInformer) } -func (f *resourceSliceInformer) Lister() v1alpha2.ResourceSliceLister { - return v1alpha2.NewResourceSliceLister(f.Informer().GetIndexer()) +func (f *resourceSliceInformer) Lister() v1alpha3.ResourceSliceLister { + return v1alpha3.NewResourceSliceLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go index 77b77c08ee..7433951855 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -34,6 +34,8 @@ type Interface interface { StorageClasses() StorageClassInformer // VolumeAttachments returns a VolumeAttachmentInformer. VolumeAttachments() VolumeAttachmentInformer + // VolumeAttributesClasses returns a VolumeAttributesClassInformer. + VolumeAttributesClasses() VolumeAttributesClassInformer } type version struct { @@ -71,3 +73,8 @@ func (v *version) StorageClasses() StorageClassInformer { func (v *version) VolumeAttachments() VolumeAttachmentInformer { return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// VolumeAttributesClasses returns a VolumeAttributesClassInformer. +func (v *version) VolumeAttributesClasses() VolumeAttributesClassInformer { + return &volumeAttributesClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..ede90ce43c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + storagev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassInformer provides access to a shared informer and lister for +// VolumeAttributesClasses. +type VolumeAttributesClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.VolumeAttributesClassLister +} + +type volumeAttributesClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().VolumeAttributesClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().VolumeAttributesClasses().Watch(context.TODO(), options) + }, + }, + &storagev1beta1.VolumeAttributesClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1beta1.VolumeAttributesClass{}, f.defaultInformer) +} + +func (f *volumeAttributesClassInformer) Lister() v1beta1.VolumeAttributesClassLister { + return v1beta1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index eaa206ff65..9cddb0bbed 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -45,6 +45,7 @@ import ( certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" @@ -67,7 +68,7 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -102,6 +103,7 @@ type Interface interface { CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface + CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -125,7 +127,7 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface + ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface @@ -159,6 +161,7 @@ type Clientset struct { certificatesV1 *certificatesv1.CertificatesV1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client + coordinationV1alpha1 *coordinationv1alpha1.CoordinationV1alpha1Client coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client @@ -182,7 +185,7 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client + resourceV1alpha3 *resourcev1alpha3.ResourceV1alpha3Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client @@ -297,6 +300,11 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return c.certificatesV1alpha1 } +// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client +func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface { + return c.coordinationV1alpha1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -412,9 +420,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { - return c.resourceV1alpha2 +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { + return c.resourceV1alpha3 } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -580,6 +588,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.coordinationV1alpha1, err = coordinationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -672,7 +684,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.resourceV1alpha3, err = resourcev1alpha3.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -746,6 +758,7 @@ func New(c rest.Interface) *Clientset { cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) + cs.coordinationV1alpha1 = coordinationv1alpha1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -769,7 +782,7 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.resourceV1alpha2 = resourcev1alpha2.New(c) + cs.resourceV1alpha3 = resourcev1alpha3.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index a62b8f7c45..132f917abe 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -21,6 +21,7 @@ package fake import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" + applyconfigurations "k8s.io/client-go/applyconfigurations" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" clientset "k8s.io/client-go/kubernetes" @@ -68,6 +69,8 @@ import ( fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" fakecoordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1/fake" + coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + fakecoordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" fakecoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -112,8 +115,8 @@ import ( fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" - resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" - fakeresourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake" + resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" + fakeresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" fakeschedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1/fake" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" @@ -133,8 +136,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -176,6 +183,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfigurations.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} @@ -286,6 +325,11 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return &fakecertificatesv1alpha1.FakeCertificatesV1alpha1{Fake: &c.Fake} } +// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client +func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface { + return &fakecoordinationv1alpha1.FakeCoordinationV1alpha1{Fake: &c.Fake} +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return &fakecoordinationv1beta1.FakeCoordinationV1beta1{Fake: &c.Fake} @@ -401,9 +445,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} } -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { - return &fakeresourcev1alpha2.FakeResourceV1alpha2{Fake: &c.Fake} +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { + return &fakeresourcev1alpha3.FakeResourceV1alpha3{Fake: &c.Fake} } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 339983fe0a..157abae5ff 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -41,6 +41,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -63,7 +64,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -103,6 +104,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, certificatesv1alpha1.AddToScheme, + coordinationv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -126,7 +128,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha2.AddToScheme, + resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 8ebfb7cea5..5262b0f046 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -41,6 +41,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -63,7 +64,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -103,6 +104,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, certificatesv1alpha1.AddToScheme, + coordinationv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -126,7 +128,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha2.AddToScheme, + resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go index b88598b715..2d371e6fc7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var mutatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("Mutating // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { + emptyResult := &v1.MutatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1.MutatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name str // DeleteCollection deletes a collection of objects. func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.MutatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context // Patch applies the patch and returns the patched mutatingWebhookConfiguration. func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingW if name == nil { return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go index c947e6572f..d6c7bec898 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1.SchemeGroupVersion.WithKind("Validating // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicy), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1.ValidatingAdmissionPolicyList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts metav1. // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) } // Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicy), err } // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicy), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) { +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicy), err } @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyList{}) return err @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched validatingAdmissionPolicy. func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicy), err } @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicy), err } @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go index 9ace735930..5b6719be0a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1.SchemeGroupVersion.WithKind("Vali // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicyBinding), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1.ValidatingAdmissionPolicyBindingList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts m // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) } // Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicyBinding), err } // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicyBinding), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicyBinding), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") } + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go index a6951c736e..ff7fc43013 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var validatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("Valida // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { + emptyResult := &v1.ValidatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1.ValidatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts met // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name s // DeleteCollection deletes a collection of objects. func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ValidatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte // Patch applies the patch and returns the patched validatingWebhookConfiguration. func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validat if name == nil { return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go index edbc826d19..e863766c60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface { // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.MutatingWebhookConfiguration { return &v1.MutatingWebhookConfiguration{} }, + func() *v1.MutatingWebhookConfigurationList { return &v1.MutatingWebhookConfigurationList{} }), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go index 0b0b05acd4..1b20e69606 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingAdmissionPolicy { return &v1.ValidatingAdmissionPolicy{} }, + func() *v1.ValidatingAdmissionPolicyList { return &v1.ValidatingAdmissionPolicyList{} }), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go index 83a8ef163d..44694b2329 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -55,143 +52,18 @@ type ValidatingAdmissionPolicyBindingInterface interface { // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingAdmissionPolicyBinding { return &v1.ValidatingAdmissionPolicyBinding{} }, + func() *v1.ValidatingAdmissionPolicyBindingList { return &v1.ValidatingAdmissionPolicyBindingList{} }), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go index 065e3c8341..11b4ac0591 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type ValidatingWebhookConfigurationInterface interface { // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingWebhookConfiguration { return &v1.ValidatingWebhookConfiguration{} }, + func() *v1.ValidatingWebhookConfigurationList { return &v1.ValidatingWebhookConfigurationList{} }), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go index f4358ce46c..ef4d843e00 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("Vali // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1alpha1.ValidatingAdmissionPolicyList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) } // Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) { +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyList{}) return err @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched validatingAdmissionPolicy. func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go index c520655f9d..f7cc966fb1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1alpha1.SchemeGroupVersion.WithKind // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1alpha1.ValidatingAdmissionPolicyBindingList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) } // Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") } + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 1d994b5abf..c2b7c825cb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ValidatingAdmissionPolicy { return &v1alpha1.ValidatingAdmissionPolicy{} }, + func() *v1alpha1.ValidatingAdmissionPolicyList { return &v1alpha1.ValidatingAdmissionPolicyList{} }), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 39823ca82b..d8d0796ead 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -55,143 +52,20 @@ type ValidatingAdmissionPolicyBindingInterface interface { // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ValidatingAdmissionPolicyBinding { return &v1alpha1.ValidatingAdmissionPolicyBinding{} }, + func() *v1alpha1.ValidatingAdmissionPolicyBindingList { + return &v1alpha1.ValidatingAdmissionPolicyBindingList{} + }), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index 9d85aff37f..7671549323 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var mutatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("Mut // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + emptyResult := &v1beta1.MutatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1beta1.MutatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.Li // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name str // DeleteCollection deletes a collection of objects. func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context // Patch applies the patch and returns the patched mutatingWebhookConfiguration. func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingW if name == nil { return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go index 90cb4ff6ca..e30891c779 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("Valid // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1beta1.ValidatingAdmissionPolicyList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) } // Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) { +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyList{}) return err @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched validatingAdmissionPolicy. func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go index f771f81f30..207db37529 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1beta1.SchemeGroupVersion.WithKind( // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1beta1.ValidatingAdmissionPolicyBindingList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) } // Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 41e3a7c1ee..f78a31ee0e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var validatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("V // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1beta1.ValidatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1. // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name s // DeleteCollection deletes a collection of objects. func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte // Patch applies the patch and returns the patched validatingWebhookConfiguration. func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validat if name == nil { return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index ca6bb8bd50..7a5bc8b9b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface { // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.MutatingWebhookConfiguration { return &v1beta1.MutatingWebhookConfiguration{} }, + func() *v1beta1.MutatingWebhookConfigurationList { return &v1beta1.MutatingWebhookConfigurationList{} }), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go index bea51b587f..0023d8837c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingAdmissionPolicy { return &v1beta1.ValidatingAdmissionPolicy{} }, + func() *v1beta1.ValidatingAdmissionPolicyList { return &v1beta1.ValidatingAdmissionPolicyList{} }), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index bba37bb047..8168d8cbcd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -55,143 +52,20 @@ type ValidatingAdmissionPolicyBindingInterface interface { // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingAdmissionPolicyBinding { return &v1beta1.ValidatingAdmissionPolicyBinding{} }, + func() *v1beta1.ValidatingAdmissionPolicyBindingList { + return &v1beta1.ValidatingAdmissionPolicyBindingList{} + }), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 5ba5974d7a..5abd96823d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -55,143 +52,20 @@ type ValidatingWebhookConfigurationInterface interface { // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingWebhookConfiguration { return &v1beta1.ValidatingWebhookConfiguration{} }, + func() *v1beta1.ValidatingWebhookConfigurationList { + return &v1beta1.ValidatingWebhookConfigurationList{} + }), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go index 738c68038b..e9f0b78d46 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go @@ -43,20 +43,22 @@ var storageversionsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersion") // Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootGetActionWithOptions(storageversionsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } // List takes label and field selectors, and returns the list of StorageVersions that match those selectors. func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { + emptyResult := &v1alpha1.StorageVersionList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageversionsResource, storageversionsKind, opts), &v1alpha1.StorageVersionList{}) + Invokes(testing.NewRootListActionWithOptions(storageversionsResource, storageversionsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested storageVersions. func (c *FakeStorageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageversionsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionsResource, opts)) } // Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. func (c *FakeStorageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootCreateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } // Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. func (c *FakeStorageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootUpdateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) { +func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(storageversionsResource, "status", storageVersion), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionsResource, "status", storageVersion, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } @@ -118,7 +123,7 @@ func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageversionsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(storageversionsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionList{}) return err @@ -126,10 +131,11 @@ func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched storageVersion. func (c *FakeStorageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, name, pt, data, subresources...), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } @@ -147,10 +153,11 @@ func (c *FakeStorageVersions) Apply(ctx context.Context, storageVersion *apiserv if name == nil { return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") } + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } @@ -169,10 +176,11 @@ func (c *FakeStorageVersions) ApplyStatus(ctx context.Context, storageVersion *a if name == nil { return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") } + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go index 18789c7f82..436593f7fa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageVersionsGetter has a method to return a StorageVersionInterface. @@ -43,6 +40,7 @@ type StorageVersionsGetter interface { type StorageVersionInterface interface { Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type StorageVersionInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) StorageVersionExpansion } // storageVersions implements StorageVersionInterface type storageVersions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration] } // newStorageVersions returns a StorageVersions func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { return &storageVersions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration]( + "storageversions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.StorageVersion { return &v1alpha1.StorageVersion{} }, + func() *v1alpha1.StorageVersionList { return &v1alpha1.StorageVersionList{} }), } } - -// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. -func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Get(). - Resource("storageversions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. -func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.StorageVersionList{} - err = c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageVersions. -func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Post(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. -func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageversions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageversions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageVersion. -func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(pt). - Resource("storageversions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersion. -func (c *storageVersions) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *storageVersions) ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index f4b198265d..252f47ba29 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ControllerRevision { return &v1.ControllerRevision{} }, + func() *v1.ControllerRevisionList { return &v1.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 53e5392879..28917a7960 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.DaemonSet { return &v1.DaemonSet{} }, + func() *v1.DaemonSetList { return &v1.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index ccc2049ff7..871d51cfe2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -45,6 +44,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Deployment { return &v1.Deployment{} }, + func() *v1.DeploymentList { return &v1.DeploymentList{} }), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index f691ba9acd..c609ef534a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1.SchemeGroupVersion.WithKind("ControllerRevision // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1.ControllerRevision{}) + Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { + emptyResult := &v1.ControllerRevisionList{} obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1.ControllerRevisionList{}) + Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOpti // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *FakeControllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) + Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) + Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ControllerRevisionList{}) return err @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts met // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision if name == nil { return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") } + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index 3e0df72352..bac3fc1225 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -44,22 +44,24 @@ var daemonsetsKind = v1.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1.DaemonSet{}) + Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { + emptyResult := &v1.DaemonSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1.DaemonSetList{}) + Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested daemonSets. func (c *FakeDaemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) + Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) + Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1.DaemonSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts metav1.De // DeleteCollection deletes a collection of objects. func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.DaemonSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.Delet // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetA if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.Daem if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index da1896fe60..8ed8432883 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -46,22 +46,24 @@ var deploymentsKind = v1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { + emptyResult := &v1.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -80,40 +82,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } @@ -128,7 +133,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.DeploymentList{}) return err @@ -136,11 +141,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } @@ -158,11 +164,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1.Deployme if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } @@ -181,33 +188,36 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1.De if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } @@ -222,11 +232,12 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, if err != nil { return nil, err } + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index dedf19b42f..942a4e64a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -46,22 +46,24 @@ var replicasetsKind = v1.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1.ReplicaSet{}) + Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { + emptyResult := &v1.ReplicaSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1.ReplicaSetList{}) + Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -80,40 +82,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested replicaSets. func (c *FakeReplicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) + Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) + Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1.ReplicaSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } @@ -128,7 +133,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ReplicaSetList{}) return err @@ -136,11 +141,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } @@ -158,11 +164,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaS if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } @@ -181,33 +188,36 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.Re if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } @@ -222,11 +232,12 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, if err != nil { return nil, err } + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index f1d7d96e8d..ae4e811fb7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -46,22 +46,24 @@ var statefulsetsKind = v1.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1.StatefulSet{}) + Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { + emptyResult := &v1.StatefulSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1.StatefulSetList{}) + Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -80,40 +82,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested statefulSets. func (c *FakeStatefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) + Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) + Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1.StatefulSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } @@ -128,7 +133,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.StatefulSetList{}) return err @@ -136,11 +141,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } @@ -158,11 +164,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1.Statef if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } @@ -181,33 +188,36 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1. if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } @@ -222,11 +232,12 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin if err != nil { return nil, err } + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 917ed521f4..d6dec016bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -45,6 +44,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ReplicaSet { return &v1.ReplicaSet{} }, + func() *v1.ReplicaSetList { return &v1.ReplicaSetList{} }), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index d1fbb915d8..b25ed07238 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -45,6 +44,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.StatefulSet { return &v1.StatefulSet{} }, + func() *v1.StatefulSetList { return &v1.StatefulSetList{} }), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 0c3f49ba14..185f7cc4e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.ControllerRevision { return &v1beta1.ControllerRevision{} }, + func() *v1beta1.ControllerRevisionList { return &v1beta1.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 281758c435..06e4b7bf93 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +40,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 1954c94703..7ea2b2e11b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1beta1.SchemeGroupVersion.WithKind("ControllerRev // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) + Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + emptyResult := &v1beta1.ControllerRevisionList{} obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta1.ControllerRevisionList{}) + Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) + Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) + Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{}) return err @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision if name == nil { return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") } + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index 9614852f74..05c557ecb3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -44,22 +44,24 @@ var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + emptyResult := &v1beta1.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta1.Dep if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -179,11 +186,12 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1bet if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index 2124515cfe..c38690554a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -44,22 +44,24 @@ var statefulsetsKind = v1beta1.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) + Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + emptyResult := &v1beta1.StatefulSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta1.StatefulSetList{}) + Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested statefulSets. func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } @@ -126,7 +131,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } @@ -156,11 +162,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.S if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } @@ -179,11 +186,12 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1b if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 3f1aebcffb..1ff69eb993 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -43,6 +40,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) StatefulSetExpansion } // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.StatefulSet { return &v1beta1.StatefulSet{} }, + func() *v1beta1.StatefulSetList { return &v1beta1.StatefulSetList{} }), } } - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e1643277a6..6caee6a725 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.ControllerRevision { return &v1beta2.ControllerRevision{} }, + func() *v1beta2.ControllerRevisionList { return &v1beta2.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 1391df87d2..766dc6d433 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.DaemonSet { return &v1beta2.DaemonSet{} }, + func() *v1beta2.DaemonSetList { return &v1beta2.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 5bda0d92c1..6592ee8cd9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +40,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.Deployment { return &v1beta2.Deployment{} }, + func() *v1beta2.DeploymentList { return &v1beta2.DeploymentList{} }), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 1bf7fb3314..45b2050706 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1beta2.SchemeGroupVersion.WithKind("ControllerRev // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) + Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + emptyResult := &v1beta2.ControllerRevisionList{} obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta2.ControllerRevisionList{}) + Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) + Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) + Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{}) return err @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision if name == nil { return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") } + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index 8f5cfa5a8a..61ceeb1411 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -44,22 +44,24 @@ var daemonsetsKind = v1beta2.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) + Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + emptyResult := &v1beta2.DaemonSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta2.DaemonSetList{}) + Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested daemonSets. func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) + Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) + Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOpt // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.Daemo if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2 if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index c9e8ab48bb..d849856a40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -44,22 +44,24 @@ var deploymentsKind = v1beta2.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + emptyResult := &v1beta2.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta2.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta2.Dep if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } @@ -179,11 +186,12 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1bet if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 46e1a78a7a..1f957f0843 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -44,22 +44,24 @@ var replicasetsKind = v1beta2.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) + Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + emptyResult := &v1beta2.ReplicaSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta2.ReplicaSetList{}) + Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested replicaSets. func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) + Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) + Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } @@ -126,7 +131,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } @@ -156,11 +162,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.Rep if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } @@ -179,11 +186,12 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1bet if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index 684f799256..ac8945aa77 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -44,22 +44,24 @@ var statefulsetsKind = v1beta2.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) + Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + emptyResult := &v1beta2.StatefulSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta2.StatefulSetList{}) + Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested statefulSets. func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) + Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) + Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } @@ -126,7 +131,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } @@ -156,11 +162,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.S if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } @@ -179,33 +186,36 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1b if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { + emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &v1beta2.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { + emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &v1beta2.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Scale), err } @@ -220,11 +230,12 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin if err != nil { return nil, err } + emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &v1beta2.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 988d898f79..90380ca980 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -43,6 +40,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) ReplicaSetExpansion } // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.ReplicaSet { return &v1beta2.ReplicaSet{} }, + func() *v1beta2.ReplicaSetList { return &v1beta2.ReplicaSetList{} }), } } - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 0416675d6d..f2d673abb9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -43,6 +42,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) @@ -61,209 +62,27 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.StatefulSet { return &v1beta2.StatefulSet{} }, + func() *v1beta2.StatefulSetList { return &v1beta2.StatefulSetList{} }), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s } result = &v1beta2.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go index e683b3eaaa..7e7c3138a5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectReview") // Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { + emptyResult := &v1.SelfSubjectReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1.SelfSubjectReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index 500e87d065..a22f335429 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -37,10 +37,11 @@ var tokenreviewsKind = v1.SchemeGroupVersion.WithKind("TokenReview") // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { + emptyResult := &v1.TokenReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1.TokenReview{}) + Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.TokenReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go index bfb9603d67..720dd9e7e9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface { // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectReview { return &v1.SelfSubjectReview{} }), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { - result = &v1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index ca7cd47d26..52c55fab08 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -41,24 +41,17 @@ type TokenReviewInterface interface { // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*v1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.TokenReview { return &v1.TokenReview{} }), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { - result = &v1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go index a20b3dd764..680460f459 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1alpha1.SchemeGroupVersion.WithKind("SelfSubjectRe // Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { + emptyResult := &v1alpha1.SelfSubjectReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1alpha1.SelfSubjectReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go index 7f8b12a46f..f034bcdbe3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go @@ -23,8 +23,8 @@ import ( v1alpha1 "k8s.io/api/authentication/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface { // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*v1alpha1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1alpha1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.SelfSubjectReview { return &v1alpha1.SelfSubjectReview{} }), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { - result = &v1alpha1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go index 4a9db85cf5..33e130e9cc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectRev // Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { + emptyResult := &v1beta1.SelfSubjectReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1beta1.SelfSubjectReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index b1988a67a3..b512f5c146 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -37,10 +37,11 @@ var tokenreviewsKind = v1beta1.SchemeGroupVersion.WithKind("TokenReview") // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { + emptyResult := &v1beta1.TokenReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1beta1.TokenReview{}) + Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.TokenReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go index 9d54826a31..d083ba8fa9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface { // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectReview { return &v1beta1.SelfSubjectReview{} }), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { - result = &v1beta1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 5da1224337..982534935e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -41,24 +41,17 @@ type TokenReviewInterface interface { // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.TokenReview { return &v1beta1.TokenReview{} }), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { - result = &v1beta1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index 43ea05328c..dd23481d39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -38,11 +38,12 @@ var localsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("LocalSubject // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { + emptyResult := &v1.LocalSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1.LocalSubjectAccessReview{}) + Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LocalSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index 27642266d6..d04b8502f3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -37,10 +37,11 @@ var selfsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectAc // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { + emptyResult := &v1.SelfSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1.SelfSubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SelfSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index cd6c682d16..71ed326f8b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -37,10 +37,11 @@ var selfsubjectrulesreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectRul // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { + emptyResult := &v1.SelfSubjectRulesReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1.SelfSubjectRulesReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SelfSubjectRulesReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index 09dab64807..358ba9aa77 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -37,10 +37,11 @@ var subjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SubjectAccessRevi // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { + emptyResult := &v1.SubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1.SubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index 84b2efe166..3d058941a2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface { // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*v1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { - result = &v1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index 2006196c11..9e874bee5a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface { // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectAccessReview { return &v1.SelfSubjectAccessReview{} }), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { - result = &v1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index 25d99f7b52..567b63ec4c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface { // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { - result = &v1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 8ac0566a2e..52e8d74e57 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface { // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { - result = &v1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index 104e979d19..e2bf627736 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -38,11 +38,12 @@ var localsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("LocalSu // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { + emptyResult := &v1beta1.LocalSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1beta1.LocalSubjectAccessReview{}) + Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.LocalSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index 517e48b760..996e4d4108 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -37,10 +37,11 @@ var selfsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubj // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { + emptyResult := &v1beta1.SelfSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1beta1.SelfSubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SelfSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index 3aed050fcf..6e4c758909 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -37,10 +37,11 @@ var selfsubjectrulesreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubje // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { + emptyResult := &v1beta1.SelfSubjectRulesReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1beta1.SelfSubjectRulesReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SelfSubjectRulesReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index e9bfa521a2..aab6e08dc2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -37,10 +37,11 @@ var subjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SubjectAcces // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { + emptyResult := &v1beta1.SubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1beta1.SubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index 78584ba945..302c094b39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface { // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*v1beta1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1beta1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.LocalSubjectAccessReview { return &v1beta1.LocalSubjectAccessReview{} }), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { - result = &v1beta1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 0286c93fe6..4b413dc4f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface { // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectAccessReview { return &v1beta1.SelfSubjectAccessReview{} }), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { - result = &v1beta1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index d772973ec6..b64cec3015 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface { // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectRulesReview { return &v1beta1.SelfSubjectRulesReview{} }), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { - result = &v1beta1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index aebe8398c0..3fca833a1b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface { // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SubjectAccessReview { return &v1beta1.SubjectAccessReview{} }), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { - result = &v1beta1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index a2c95b7539..23e2c391dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v1.SchemeGroupVersion.WithKind("HorizontalPod // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + emptyResult := &v1.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v1.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.Lis // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 19afde66db..4d29ac5227 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.HorizontalPodAutoscaler { return &v1.HorizontalPodAutoscaler{} }, + func() *v1.HorizontalPodAutoscalerList { return &v1.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go index cfcc208232..2ca3d27c94 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2.SchemeGroupVersion.WithKind("HorizontalPod // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { + emptyResult := &v2.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v2.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go index 3a077d71da..dbce8d1020 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2 import ( "context" - json "encoding/json" - "fmt" - "time" v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2.HorizontalPodAutoscaler { return &v2.HorizontalPodAutoscaler{} }, + func() *v2.HorizontalPodAutoscalerList { return &v2.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 0b2658e642..7f99b5e8fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2beta1.SchemeGroupVersion.WithKind("Horizont // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta1.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 5080912a12..6bc1b77766 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2beta1.HorizontalPodAutoscaler { return &v2beta1.HorizontalPodAutoscaler{} }, + func() *v2beta1.HorizontalPodAutoscalerList { return &v2beta1.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index 0a7c93c3d3..e037e8ac48 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2beta2.SchemeGroupVersion.WithKind("Horizont // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta2.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v2beta2.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 0ddb9108b3..6f464661a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2beta2.HorizontalPodAutoscaler { return &v2beta2.HorizontalPodAutoscaler{} }, + func() *v2beta2.HorizontalPodAutoscalerList { return &v2beta2.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go index 9250263215..7907a5bf56 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -43,6 +40,7 @@ type CronJobsGetter interface { type CronJobInterface interface { Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (*v1.CronJob, error) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type CronJobInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.CronJob { return &v1.CronJob{} }, + func() *v1.CronJobList { return &v1.CronJobList{} }), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go index 0cbcce6d81..171bb82329 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go @@ -44,22 +44,24 @@ var cronjobsKind = v1.SchemeGroupVersion.WithKind("CronJob") // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. func (c *FakeCronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1.CronJob{}) + Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { + emptyResult := &v1.CronJobList{} obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1.CronJobList{}) + Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested cronJobs. func (c *FakeCronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts)) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) + Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) + Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1.CronJob{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } @@ -126,7 +131,7 @@ func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts metav1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CronJobList{}) return err @@ -134,11 +139,12 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteO // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } @@ -156,11 +162,12 @@ func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyC if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } @@ -179,11 +186,12 @@ func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJob if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index cf1a913bdf..23e66953cb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -44,22 +44,24 @@ var jobsKind = v1.SchemeGroupVersion.WithKind("Job") // Get takes name of the job, and returns the corresponding job object, and an error if there is any. func (c *FakeJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewGetAction(jobsResource, c.ns, name), &v1.Job{}) + Invokes(testing.NewGetActionWithOptions(jobsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { + emptyResult := &v1.JobList{} obj, err := c.Fake. - Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &v1.JobList{}) + Invokes(testing.NewListActionWithOptions(jobsResource, jobsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested jobs. func (c *FakeJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(jobsResource, c.ns, opts)) } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. func (c *FakeJobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &v1.Job{}) + Invokes(testing.NewCreateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. func (c *FakeJobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{}) + Invokes(testing.NewUpdateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) { +func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(jobsResource, "status", c.ns, job, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } @@ -126,7 +131,7 @@ func (c *FakeJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(jobsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.JobList{}) return err @@ -134,11 +139,12 @@ func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptio // Patch applies the patch and returns the patched job. func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &v1.Job{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } @@ -156,11 +162,12 @@ func (c *FakeJobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration if name == nil { return nil, fmt.Errorf("job.Name must be provided to Apply") } + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Job{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } @@ -179,11 +186,12 @@ func (c *FakeJobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfigu if name == nil { return nil, fmt.Errorf("job.Name must be provided to Apply") } + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Job{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index c076c80af2..83dbe6fa4f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // JobsGetter has a method to return a JobInterface. @@ -43,6 +40,7 @@ type JobsGetter interface { type JobInterface interface { Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type JobInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) JobExpansion } // jobs implements JobInterface type jobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration] } // newJobs returns a Jobs func newJobs(c *BatchV1Client, namespace string) *jobs { return &jobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration]( + "jobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Job { return &v1.Job{} }, + func() *v1.JobList { return &v1.JobList{} }), } } - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied job. -func (c *jobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *jobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index d687339ae9..a6f7399d84 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -43,6 +40,7 @@ type CronJobsGetter interface { type CronJobInterface interface { Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type CronJobInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.CronJob { return &v1beta1.CronJob{} }, + func() *v1beta1.CronJobList { return &v1beta1.CronJobList{} }), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index 9d078f55a9..71cd4f1653 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -44,22 +44,24 @@ var cronjobsKind = v1beta1.SchemeGroupVersion.WithKind("CronJob") // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) + Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + emptyResult := &v1beta1.CronJobList{} obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1beta1.CronJobList{}) + Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested cronJobs. func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts)) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) + Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) + Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } @@ -126,7 +131,7 @@ func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CronJobList{}) return err @@ -134,11 +139,12 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptio // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } @@ -156,11 +162,12 @@ func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobA if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } @@ -179,11 +186,12 @@ func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.Cr if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go index 0d6b68b296..9fa3300e6c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface { type CertificateSigningRequestInterface interface { Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (*v1.CertificateSigningRequest, error) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,6 +49,7 @@ type CertificateSigningRequestInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) @@ -59,195 +58,26 @@ type CertificateSigningRequestInterface interface { // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CertificateSigningRequest { return &v1.CertificateSigningRequest{} }, + func() *v1.CertificateSigningRequestList { return &v1.CertificateSigningRequestList{} }), } } -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // UpdateApproval takes the top resource name and the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { result = &v1.CertificateSigningRequest{} - err = c.client.Put(). + err = c.GetClient().Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequestName). SubResource("approval"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go index adb7db0bf6..f3fc99f839 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go @@ -43,20 +43,22 @@ var certificatesigningrequestsKind = v1.SchemeGroupVersion.WithKind("Certificate // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { + emptyResult := &v1.CertificateSigningRequestList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1.CertificateSigningRequestList{}) + Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.L // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts)) } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) { +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } @@ -118,7 +123,7 @@ func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string // DeleteCollection deletes a collection of objects. func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CertificateSigningRequestList{}) return err @@ -126,10 +131,11 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, o // Patch applies the patch and returns the patched certificateSigningRequest. func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } @@ -147,10 +153,11 @@ func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateS if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } @@ -169,20 +176,22 @@ func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certif if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // UpdateApproval takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "approval", certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go index 970fb15e6e..74fe9fa14c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/certificates/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. @@ -55,143 +52,18 @@ type ClusterTrustBundleInterface interface { // clusterTrustBundles implements ClusterTrustBundleInterface type clusterTrustBundles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration] } // newClusterTrustBundles returns a ClusterTrustBundles func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { return &clusterTrustBundles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration]( + "clustertrustbundles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterTrustBundle { return &v1alpha1.ClusterTrustBundle{} }, + func() *v1alpha1.ClusterTrustBundleList { return &v1alpha1.ClusterTrustBundleList{} }), } } - -// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. -func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Get(). - Resource("clustertrustbundles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. -func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterTrustBundleList{} - err = c.client.Get(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterTrustBundles. -func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Post(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTrustBundle). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Put(). - Resource("clustertrustbundles"). - Name(clusterTrustBundle.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTrustBundle). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. -func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustertrustbundles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustertrustbundles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterTrustBundle. -func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Patch(pt). - Resource("clustertrustbundles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. -func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - if clusterTrustBundle == nil { - return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterTrustBundle) - if err != nil { - return nil, err - } - name := clusterTrustBundle.Name - if name == nil { - return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") - } - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clustertrustbundles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go index 2f849cbd7d..1c4e97bd40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go @@ -43,20 +43,22 @@ var clustertrustbundlesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterTrust // Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. func (c *FakeClusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustertrustbundlesResource, name), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootGetActionWithOptions(clustertrustbundlesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } // List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { + emptyResult := &v1alpha1.ClusterTrustBundleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustertrustbundlesResource, clustertrustbundlesKind, opts), &v1alpha1.ClusterTrustBundleList{}) + Invokes(testing.NewRootListActionWithOptions(clustertrustbundlesResource, clustertrustbundlesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested clusterTrustBundles. func (c *FakeClusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustertrustbundlesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clustertrustbundlesResource, opts)) } // Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. func (c *FakeClusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootCreateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } // Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. func (c *FakeClusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootUpdateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } @@ -107,7 +111,7 @@ func (c *FakeClusterTrustBundles) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustertrustbundlesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clustertrustbundlesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterTrustBundleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched clusterTrustBundle. func (c *FakeClusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, name, pt, data, subresources...), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } @@ -136,10 +141,11 @@ func (c *FakeClusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle if name == nil { return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") } + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index ec0b9d266f..de9915c5d6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface { type CertificateSigningRequestInterface interface { Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type CertificateSigningRequestInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) CertificateSigningRequestExpansion } // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CertificateSigningRequest { return &v1beta1.CertificateSigningRequest{} }, + func() *v1beta1.CertificateSigningRequestList { return &v1beta1.CertificateSigningRequestList{} }), } } - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go index 4737891411..4e631b0a40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -30,7 +30,7 @@ type CertificateSigningRequestExpansion interface { func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) { result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). + err = c.GetClient().Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). VersionedParams(&opts, scheme.ParameterCodec). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index 76bb38e7bf..ff5a9bd4c7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -43,20 +43,22 @@ var certificatesigningrequestsKind = v1beta1.SchemeGroupVersion.WithKind("Certif // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + emptyResult := &v1beta1.CertificateSigningRequestList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1beta1.CertificateSigningRequestList{}) + Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListO // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts)) } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) { +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } @@ -118,7 +123,7 @@ func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string // DeleteCollection deletes a collection of objects. func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) return err @@ -126,10 +131,11 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, o // Patch applies the patch and returns the patched certificateSigningRequest. func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } @@ -147,10 +153,11 @@ func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateS if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } @@ -169,10 +176,11 @@ func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certif if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go index 6dc7c4c17f..03f833f370 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go @@ -44,22 +44,24 @@ var leasesKind = v1.SchemeGroupVersion.WithKind("Lease") // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. func (c *FakeLeases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1.Lease{}) + Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { + emptyResult := &v1.LeaseList{} obj, err := c.Fake. - Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1.LeaseList{}) + Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested leases. func (c *FakeLeases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts)) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1.Lease{}) + Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1.Lease{}) + Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } @@ -114,7 +118,7 @@ func (c *FakeLeases) Delete(ctx context.Context, name string, opts metav1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.LeaseList{}) return err @@ -122,11 +126,12 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt // Patch applies the patch and returns the patched lease. func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } @@ -144,11 +149,12 @@ func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1.LeaseApply if name == nil { return nil, fmt.Errorf("lease.Name must be provided to Apply") } + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go index 9e6b169a81..97834d6ac0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -55,154 +52,18 @@ type LeaseInterface interface { // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Lease { return &v1.Lease{} }, + func() *v1.LeaseList { return &v1.LeaseList{} }), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go new file mode 100644 index 0000000000..dd75e5d014 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1alpha1Interface interface { + RESTClient() rest.Interface + LeaseCandidatesGetter +} + +// CoordinationV1alpha1Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1alpha1Client) LeaseCandidates(namespace string) LeaseCandidateInterface { + return newLeaseCandidates(c, namespace) +} + +// NewForConfig creates a new CoordinationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CoordinationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1alpha1Client { + return &CoordinationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go index baaf2d9853..df51baa4d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha2 +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go new file mode 100644 index 0000000000..2e7d4be268 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCoordinationV1alpha1 struct { + *testing.Fake +} + +func (c *FakeCoordinationV1alpha1) LeaseCandidates(namespace string) v1alpha1.LeaseCandidateInterface { + return &FakeLeaseCandidates{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCoordinationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go new file mode 100644 index 0000000000..c3de2303ca --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go @@ -0,0 +1,160 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeLeaseCandidates implements LeaseCandidateInterface +type FakeLeaseCandidates struct { + Fake *FakeCoordinationV1alpha1 + ns string +} + +var leasecandidatesResource = v1alpha1.SchemeGroupVersion.WithResource("leasecandidates") + +var leasecandidatesKind = v1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate") + +// Get takes name of the leaseCandidate, and returns the corresponding leaseCandidate object, and an error if there is any. +func (c *FakeLeaseCandidates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(leasecandidatesResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// List takes label and field selectors, and returns the list of LeaseCandidates that match those selectors. +func (c *FakeLeaseCandidates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.LeaseCandidateList, err error) { + emptyResult := &v1alpha1.LeaseCandidateList{} + obj, err := c.Fake. + Invokes(testing.NewListActionWithOptions(leasecandidatesResource, leasecandidatesKind, c.ns, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.LeaseCandidateList{ListMeta: obj.(*v1alpha1.LeaseCandidateList).ListMeta} + for _, item := range obj.(*v1alpha1.LeaseCandidateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested leaseCandidates. +func (c *FakeLeaseCandidates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchActionWithOptions(leasecandidatesResource, c.ns, opts)) + +} + +// Create takes the representation of a leaseCandidate and creates it. Returns the server's representation of the leaseCandidate, and an error, if there is any. +func (c *FakeLeaseCandidates) Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// Update takes the representation of a leaseCandidate and updates it. Returns the server's representation of the leaseCandidate, and an error, if there is any. +func (c *FakeLeaseCandidates) Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// Delete takes name of the leaseCandidate and deletes it. Returns an error if one occurs. +func (c *FakeLeaseCandidates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(leasecandidatesResource, c.ns, name, opts), &v1alpha1.LeaseCandidate{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeLeaseCandidates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionActionWithOptions(leasecandidatesResource, c.ns, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.LeaseCandidateList{}) + return err +} + +// Patch applies the patch and returns the patched leaseCandidate. +func (c *FakeLeaseCandidates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied leaseCandidate. +func (c *FakeLeaseCandidates) Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) { + if leaseCandidate == nil { + return nil, fmt.Errorf("leaseCandidate provided to Apply must not be nil") + } + data, err := json.Marshal(leaseCandidate) + if err != nil { + return nil, err + } + name := leaseCandidate.Name + if name == nil { + return nil, fmt.Errorf("leaseCandidate.Name must be provided to Apply") + } + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..2dc2f30cfc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type LeaseCandidateExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..868185135b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// LeaseCandidatesGetter has a method to return a LeaseCandidateInterface. +// A group's client should implement this interface. +type LeaseCandidatesGetter interface { + LeaseCandidates(namespace string) LeaseCandidateInterface +} + +// LeaseCandidateInterface has methods to work with LeaseCandidate resources. +type LeaseCandidateInterface interface { + Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (*v1alpha1.LeaseCandidate, error) + Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (*v1alpha1.LeaseCandidate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.LeaseCandidate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.LeaseCandidateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) + Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) + LeaseCandidateExpansion +} + +// leaseCandidates implements LeaseCandidateInterface +type leaseCandidates struct { + *gentype.ClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration] +} + +// newLeaseCandidates returns a LeaseCandidates +func newLeaseCandidates(c *CoordinationV1alpha1Client, namespace string) *leaseCandidates { + return &leaseCandidates{ + gentype.NewClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration]( + "leasecandidates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.LeaseCandidate { return &v1alpha1.LeaseCandidate{} }, + func() *v1alpha1.LeaseCandidateList { return &v1alpha1.LeaseCandidateList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 9a4a0d7eb9..112784af94 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -44,22 +44,24 @@ var leasesKind = v1beta1.SchemeGroupVersion.WithKind("Lease") // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1beta1.Lease{}) + Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { + emptyResult := &v1beta1.LeaseList{} obj, err := c.Fake. - Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1beta1.LeaseList{}) + Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested leases. func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts)) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) + Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) + Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } @@ -114,7 +118,7 @@ func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOpti // DeleteCollection deletes a collection of objects. func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.LeaseList{}) return err @@ -122,11 +126,12 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions // Patch applies the patch and returns the patched lease. func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } @@ -144,11 +149,12 @@ func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1beta1.Lease if name == nil { return nil, fmt.Errorf("lease.Name must be provided to Apply") } + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 1bbd57bdd1..62341e53b6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -55,154 +52,18 @@ type LeaseInterface interface { // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Lease { return &v1beta1.Lease{} }, + func() *v1beta1.LeaseList { return &v1beta1.LeaseList{} }), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1beta1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 0fef56429d..ab9458a5c9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ComponentStatusesGetter has a method to return a ComponentStatusInterface. @@ -55,143 +52,18 @@ type ComponentStatusInterface interface { // componentStatuses implements ComponentStatusInterface type componentStatuses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration] } // newComponentStatuses returns a ComponentStatuses func newComponentStatuses(c *CoreV1Client) *componentStatuses { return &componentStatuses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration]( + "componentstatuses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ComponentStatus { return &v1.ComponentStatus{} }, + func() *v1.ComponentStatusList { return &v1.ComponentStatusList{} }), } } - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Patch(pt). - Resource("componentstatuses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus. -func (c *componentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) { - if componentStatus == nil { - return nil, fmt.Errorf("componentStatus provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(componentStatus) - if err != nil { - return nil, err - } - name := componentStatus.Name - if name == nil { - return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") - } - result = &v1.ComponentStatus{} - err = c.client.Patch(types.ApplyPatchType). - Resource("componentstatuses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index b68177720b..72aa2361f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ConfigMapsGetter has a method to return a ConfigMapInterface. @@ -55,154 +52,18 @@ type ConfigMapInterface interface { // configMaps implements ConfigMapInterface type configMaps struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration] } // newConfigMaps returns a ConfigMaps func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { return &configMaps{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration]( + "configmaps", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ConfigMap { return &v1.ConfigMap{} }, + func() *v1.ConfigMapList { return &v1.ConfigMapList{} }), } } - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied configMap. -func (c *configMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) { - if configMap == nil { - return nil, fmt.Errorf("configMap provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(configMap) - if err != nil { - return nil, err - } - name := configMap.Name - if name == nil { - return nil, fmt.Errorf("configMap.Name must be provided to Apply") - } - result = &v1.ConfigMap{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("configmaps"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index cdf464b069..9b9fc5fc1e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointsGetter has a method to return a EndpointsInterface. @@ -55,154 +52,18 @@ type EndpointsInterface interface { // endpoints implements EndpointsInterface type endpoints struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration] } // newEndpoints returns a Endpoints func newEndpoints(c *CoreV1Client, namespace string) *endpoints { return &endpoints{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration]( + "endpoints", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Endpoints { return &v1.Endpoints{} }, + func() *v1.EndpointsList { return &v1.EndpointsList{} }), } } - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpoints. -func (c *endpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) { - if endpoints == nil { - return nil, fmt.Errorf("endpoints provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpoints) - if err != nil { - return nil, err - } - name := endpoints.Name - if name == nil { - return nil, fmt.Errorf("endpoints.Name must be provided to Apply") - } - result = &v1.Endpoints{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpoints"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 8274d85ffe..5ff0f06906 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *CoreV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index a3fdf57a98..4243572328 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -48,11 +48,11 @@ type EventExpansion interface { // event; it must either match this event client's namespace, or this event // client must have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -67,11 +67,11 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // created with the "" namespace. Update also requires the ResourceVersion to be set in the event // object. func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -87,11 +87,11 @@ func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // match this event client's namespace, or this event client must have been // created with the "" namespace. func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { - if e.ns != "" && incompleteEvent.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + if e.GetNamespace() != "" && incompleteEvent.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). Resource("events"). Name(incompleteEvent.Name). @@ -109,8 +109,8 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev if err != nil { return nil, err } - if len(e.ns) > 0 && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + if len(e.GetNamespace()) > 0 && ref.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.GetNamespace()) } stringRefKind := string(ref.Kind) var refKind *string diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index 39d4c3282e..dbd305280b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -43,20 +43,22 @@ var componentstatusesKind = v1.SchemeGroupVersion.WithKind("ComponentStatus") // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) + Invokes(testing.NewRootGetActionWithOptions(componentstatusesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { + emptyResult := &v1.ComponentStatusList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &v1.ComponentStatusList{}) + Invokes(testing.NewRootListActionWithOptions(componentstatusesResource, componentstatusesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOption // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *FakeComponentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(componentstatusesResource, opts)) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + Invokes(testing.NewRootCreateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + Invokes(testing.NewRootUpdateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } @@ -107,7 +111,7 @@ func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts me // DeleteCollection deletes a collection of objects. func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(componentstatusesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) return err @@ -115,10 +119,11 @@ func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav // Patch applies the patch and returns the patched componentStatus. func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &v1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } @@ -136,10 +141,11 @@ func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *core if name == nil { return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") } + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, *name, types.ApplyPatchType, data), &v1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index 6e8a38bd8f..ae760add7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -44,22 +44,24 @@ var configmapsKind = v1.SchemeGroupVersion.WithKind("ConfigMap") // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. func (c *FakeConfigMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + Invokes(testing.NewGetActionWithOptions(configmapsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { + emptyResult := &v1.ConfigMapList{} obj, err := c.Fake. - Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &v1.ConfigMapList{}) + Invokes(testing.NewListActionWithOptions(configmapsResource, configmapsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested configMaps. func (c *FakeConfigMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(configmapsResource, c.ns, opts)) } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. func (c *FakeConfigMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + Invokes(testing.NewCreateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. func (c *FakeConfigMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + Invokes(testing.NewUpdateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } @@ -114,7 +118,7 @@ func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts metav1.De // DeleteCollection deletes a collection of objects. func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(configmapsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) return err @@ -122,11 +126,12 @@ func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.Delet // Patch applies the patch and returns the patched configMap. func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &v1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } @@ -144,11 +149,12 @@ func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapA if name == nil { return nil, fmt.Errorf("configMap.Name must be provided to Apply") } + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index 6b2f6c249e..7e2e91cfa6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -44,22 +44,24 @@ var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints") // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + Invokes(testing.NewGetActionWithOptions(endpointsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + emptyResult := &v1.EndpointsList{} obj, err := c.Fake. - Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &v1.EndpointsList{}) + Invokes(testing.NewListActionWithOptions(endpointsResource, endpointsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (resu // Watch returns a watch.Interface that watches the requested endpoints. func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(endpointsResource, c.ns, opts)) } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + Invokes(testing.NewCreateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + Invokes(testing.NewUpdateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } @@ -114,7 +118,7 @@ func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.Del // DeleteCollection deletes a collection of objects. func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(endpointsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.Delete // Patch applies the patch and returns the patched endpoints. func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &v1.Endpoints{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } @@ -144,11 +149,12 @@ func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsAp if name == nil { return nil, fmt.Errorf("endpoints.Name must be provided to Apply") } + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Endpoints{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 9ad879b394..a438ba4737 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -44,22 +44,24 @@ var eventsKind = v1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) + Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { + emptyResult := &v1.EventList{} obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) + Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EventList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *corev1.EventApplyConfigur if name == nil { return nil, fmt.Errorf("event.Name must be provided to Apply") } + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index f18b5741c3..4cc36131ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -44,22 +44,24 @@ var limitrangesKind = v1.SchemeGroupVersion.WithKind("LimitRange") // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. func (c *FakeLimitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + Invokes(testing.NewGetActionWithOptions(limitrangesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { + emptyResult := &v1.LimitRangeList{} obj, err := c.Fake. - Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &v1.LimitRangeList{}) + Invokes(testing.NewListActionWithOptions(limitrangesResource, limitrangesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested limitRanges. func (c *FakeLimitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(limitrangesResource, c.ns, opts)) } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + Invokes(testing.NewCreateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + Invokes(testing.NewUpdateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } @@ -114,7 +118,7 @@ func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(limitrangesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) return err @@ -122,11 +126,12 @@ func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched limitRange. func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &v1.LimitRange{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } @@ -144,11 +149,12 @@ func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRan if name == nil { return nil, fmt.Errorf("limitRange.Name must be provided to Apply") } + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, *name, types.ApplyPatchType, data), &v1.LimitRange{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 52fcff591e..093990571f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -43,20 +43,22 @@ var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace") // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) + Invokes(testing.NewRootGetActionWithOptions(namespacesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + emptyResult := &v1.NamespaceList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &v1.NamespaceList{}) + Invokes(testing.NewRootListActionWithOptions(namespacesResource, namespacesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested namespaces. func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(namespacesResource, opts)) } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) + Invokes(testing.NewRootCreateActionWithOptions(namespacesResource, namespace, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) + Invokes(testing.NewRootUpdateActionWithOptions(namespacesResource, namespace, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) { +func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(namespacesResource, "status", namespace, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } @@ -118,10 +123,11 @@ func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.De // Patch applies the patch and returns the patched namespace. func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &v1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } @@ -139,10 +145,11 @@ func (c *FakeNamespaces) Apply(ctx context.Context, namespace *corev1.NamespaceA if name == nil { return nil, fmt.Errorf("namespace.Name must be provided to Apply") } + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data), &v1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } @@ -161,10 +168,11 @@ func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *corev1.Name if name == nil { return nil, fmt.Errorf("namespace.Name must be provided to Apply") } + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data, "status"), &v1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 5df40f8d11..451f992da1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -43,20 +43,22 @@ var nodesKind = v1.SchemeGroupVersion.WithKind("Node") // Get takes name of the node, and returns the corresponding node object, and an error if there is any. func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{}) + Invokes(testing.NewRootGetActionWithOptions(nodesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { + emptyResult := &v1.NodeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &v1.NodeList{}) + Invokes(testing.NewRootListActionWithOptions(nodesResource, nodesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested nodes. func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(nodesResource, opts)) } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{}) + Invokes(testing.NewRootCreateActionWithOptions(nodesResource, node, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{}) + Invokes(testing.NewRootUpdateActionWithOptions(nodesResource, node, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) { +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(nodesResource, "status", node, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } @@ -118,7 +123,7 @@ func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(nodesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.NodeList{}) return err @@ -126,10 +131,11 @@ func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOpti // Patch applies the patch and returns the patched node. func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &v1.Node{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } @@ -147,10 +153,11 @@ func (c *FakeNodes) Apply(ctx context.Context, node *corev1.NodeApplyConfigurati if name == nil { return nil, fmt.Errorf("node.Name must be provided to Apply") } + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data), &v1.Node{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } @@ -169,10 +176,11 @@ func (c *FakeNodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfi if name == nil { return nil, fmt.Errorf("node.Name must be provided to Apply") } + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data, "status"), &v1.Node{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 5b06d0b192..16a1f2201a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -43,20 +43,22 @@ var persistentvolumesKind = v1.SchemeGroupVersion.WithKind("PersistentVolume") // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + Invokes(testing.NewRootGetActionWithOptions(persistentvolumesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { + emptyResult := &v1.PersistentVolumeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &v1.PersistentVolumeList{}) + Invokes(testing.NewRootListActionWithOptions(persistentvolumesResource, persistentvolumesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOption // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *FakePersistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(persistentvolumesResource, opts)) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + Invokes(testing.NewRootCreateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + Invokes(testing.NewRootUpdateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) { +func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(persistentvolumesResource, "status", persistentVolume, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } @@ -118,7 +123,7 @@ func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts me // DeleteCollection deletes a collection of objects. func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(persistentvolumesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) return err @@ -126,10 +131,11 @@ func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav // Patch applies the patch and returns the patched persistentVolume. func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &v1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } @@ -147,10 +153,11 @@ func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *cor if name == nil { return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data), &v1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } @@ -169,10 +176,11 @@ func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolum if name == nil { return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index b860e53674..12617c2432 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -44,22 +44,24 @@ var persistentvolumeclaimsKind = v1.SchemeGroupVersion.WithKind("PersistentVolum // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewGetActionWithOptions(persistentvolumeclaimsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + emptyResult := &v1.PersistentVolumeClaimList{} obj, err := c.Fake. - Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &v1.PersistentVolumeClaimList{}) + Invokes(testing.NewListActionWithOptions(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListO // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(persistentvolumeclaimsResource, c.ns, opts)) } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewCreateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewUpdateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) { +func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } @@ -126,7 +131,7 @@ func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, op // DeleteCollection deletes a collection of objects. func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(persistentvolumeclaimsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{}) return err @@ -134,11 +139,12 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts // Patch applies the patch and returns the patched persistentVolumeClaim. func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } @@ -156,11 +162,12 @@ func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolume if name == nil { return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } @@ -179,11 +186,12 @@ func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistent if name == nil { return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index 23634c7d07..d2b46e8e3a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -44,22 +44,24 @@ var podsKind = v1.SchemeGroupVersion.WithKind("Pod") // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) + Invokes(testing.NewGetActionWithOptions(podsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { + emptyResult := &v1.PodList{} obj, err := c.Fake. - Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &v1.PodList{}) + Invokes(testing.NewListActionWithOptions(podsResource, podsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested pods. func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(podsResource, c.ns, opts)) } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) + Invokes(testing.NewCreateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) + Invokes(testing.NewUpdateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) { +func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "status", c.ns, pod, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } @@ -126,7 +131,7 @@ func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(podsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PodList{}) return err @@ -134,11 +139,12 @@ func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptio // Patch applies the patch and returns the patched pod. func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &v1.Pod{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } @@ -156,11 +162,12 @@ func (c *FakePods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, if name == nil { return nil, fmt.Errorf("pod.Name must be provided to Apply") } + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Pod{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } @@ -179,22 +186,24 @@ func (c *FakePods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfigur if name == nil { return nil, fmt.Errorf("pod.Name must be provided to Apply") } + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Pod{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &v1.Pod{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "ephemeralcontainers", c.ns, pod, opts), &v1.Pod{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index 9fa97ab402..dc9affdd06 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -44,22 +44,24 @@ var podtemplatesKind = v1.SchemeGroupVersion.WithKind("PodTemplate") // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. func (c *FakePodTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + Invokes(testing.NewGetActionWithOptions(podtemplatesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { + emptyResult := &v1.PodTemplateList{} obj, err := c.Fake. - Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &v1.PodTemplateList{}) + Invokes(testing.NewListActionWithOptions(podtemplatesResource, podtemplatesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested podTemplates. func (c *FakePodTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(podtemplatesResource, c.ns, opts)) } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + Invokes(testing.NewCreateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + Invokes(testing.NewUpdateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } @@ -114,7 +118,7 @@ func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(podtemplatesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) return err @@ -122,11 +126,12 @@ func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched podTemplate. func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &v1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } @@ -144,11 +149,12 @@ func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTem if name == nil { return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") } + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 1e469c9b1a..6b3497f089 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -45,22 +45,24 @@ var replicationcontrollersKind = v1.SchemeGroupVersion.WithKind("ReplicationCont // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + Invokes(testing.NewGetActionWithOptions(replicationcontrollersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { + emptyResult := &v1.ReplicationControllerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &v1.ReplicationControllerList{}) + Invokes(testing.NewListActionWithOptions(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -79,40 +81,43 @@ func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListO // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *FakeReplicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicationcontrollersResource, c.ns, opts)) } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + Invokes(testing.NewCreateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + Invokes(testing.NewUpdateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) { +func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "status", c.ns, replicationController, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } @@ -127,7 +132,7 @@ func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, op // DeleteCollection deletes a collection of objects. func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicationcontrollersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) return err @@ -135,11 +140,12 @@ func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts // Patch applies the patch and returns the patched replicationController. func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &v1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } @@ -157,11 +163,12 @@ func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationContr if name == nil { return nil, fmt.Errorf("replicationController.Name must be provided to Apply") } + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } @@ -180,33 +187,36 @@ func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicatio if name == nil { return nil, fmt.Errorf("replicationController.Name must be provided to Apply") } + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(replicationcontrollersResource, c.ns, "scale", replicationControllerName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 87664985ce..5e2e02afc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -44,22 +44,24 @@ var resourcequotasKind = v1.SchemeGroupVersion.WithKind("ResourceQuota") // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + Invokes(testing.NewGetActionWithOptions(resourcequotasResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { + emptyResult := &v1.ResourceQuotaList{} obj, err := c.Fake. - Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &v1.ResourceQuotaList{}) + Invokes(testing.NewListActionWithOptions(resourcequotasResource, resourcequotasKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *FakeResourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(resourcequotasResource, c.ns, opts)) } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + Invokes(testing.NewCreateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + Invokes(testing.NewUpdateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) { +func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(resourcequotasResource, "status", c.ns, resourceQuota, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } @@ -126,7 +131,7 @@ func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(resourcequotasResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) return err @@ -134,11 +139,12 @@ func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched resourceQuota. func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &v1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } @@ -156,11 +162,12 @@ func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.Re if name == nil { return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") } + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data), &v1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } @@ -179,11 +186,12 @@ func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *cor if name == nil { return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") } + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index 90035a7037..ec0fc65b5b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -44,22 +44,24 @@ var secretsKind = v1.SchemeGroupVersion.WithKind("Secret") // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) + Invokes(testing.NewGetActionWithOptions(secretsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { + emptyResult := &v1.SecretList{} obj, err := c.Fake. - Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &v1.SecretList{}) + Invokes(testing.NewListActionWithOptions(secretsResource, secretsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested secrets. func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(secretsResource, c.ns, opts)) } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) + Invokes(testing.NewCreateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) + Invokes(testing.NewUpdateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } @@ -114,7 +118,7 @@ func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(secretsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.SecretList{}) return err @@ -122,11 +126,12 @@ func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOp // Patch applies the patch and returns the patched secret. func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &v1.Secret{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } @@ -144,11 +149,12 @@ func (c *FakeSecrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfi if name == nil { return nil, fmt.Errorf("secret.Name must be provided to Apply") } + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Secret{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 514ab19e39..2a3cf45fbc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -44,22 +44,24 @@ var servicesKind = v1.SchemeGroupVersion.WithKind("Service") // Get takes name of the service, and returns the corresponding service object, and an error if there is any. func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) + Invokes(testing.NewGetActionWithOptions(servicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { + emptyResult := &v1.ServiceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1.ServiceList{}) + Invokes(testing.NewListActionWithOptions(servicesResource, servicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested services. func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(servicesResource, c.ns, opts)) } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + Invokes(testing.NewCreateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) + Invokes(testing.NewUpdateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) { +func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(servicesResource, "status", c.ns, service, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } @@ -126,11 +131,12 @@ func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.Dele // Patch applies the patch and returns the patched service. func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1.Service{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } @@ -148,11 +154,12 @@ func (c *FakeServices) Apply(ctx context.Context, service *corev1.ServiceApplyCo if name == nil { return nil, fmt.Errorf("service.Name must be provided to Apply") } + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Service{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } @@ -171,11 +178,12 @@ func (c *FakeServices) ApplyStatus(ctx context.Context, service *corev1.ServiceA if name == nil { return nil, fmt.Errorf("service.Name must be provided to Apply") } + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Service{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index 115ff07123..f3ad8d40f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -45,22 +45,24 @@ var serviceaccountsKind = v1.SchemeGroupVersion.WithKind("ServiceAccount") // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + Invokes(testing.NewGetActionWithOptions(serviceaccountsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { + emptyResult := &v1.ServiceAccountList{} obj, err := c.Fake. - Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &v1.ServiceAccountList{}) + Invokes(testing.NewListActionWithOptions(serviceaccountsResource, serviceaccountsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -79,28 +81,30 @@ func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *FakeServiceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(serviceaccountsResource, c.ns, opts)) } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + Invokes(testing.NewCreateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + Invokes(testing.NewUpdateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } @@ -115,7 +119,7 @@ func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts meta // DeleteCollection deletes a collection of objects. func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(serviceaccountsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) return err @@ -123,11 +127,12 @@ func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1. // Patch applies the patch and returns the patched serviceAccount. func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &v1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } @@ -145,22 +150,24 @@ func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *corev1. if name == nil { return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") } + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { + emptyResult := &authenticationv1.TokenRequest{} obj, err := c.Fake. - Invokes(testing.NewCreateSubresourceAction(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest), &authenticationv1.TokenRequest{}) + Invokes(testing.NewCreateSubresourceActionWithOptions(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*authenticationv1.TokenRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index e6883b607c..f8e4048f98 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LimitRangesGetter has a method to return a LimitRangeInterface. @@ -55,154 +52,18 @@ type LimitRangeInterface interface { // limitRanges implements LimitRangeInterface type limitRanges struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration] } // newLimitRanges returns a LimitRanges func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { return &limitRanges{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration]( + "limitranges", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.LimitRange { return &v1.LimitRange{} }, + func() *v1.LimitRangeList { return &v1.LimitRangeList{} }), } } - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied limitRange. -func (c *limitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) { - if limitRange == nil { - return nil, fmt.Errorf("limitRange provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(limitRange) - if err != nil { - return nil, err - } - name := limitRange.Name - if name == nil { - return nil, fmt.Errorf("limitRange.Name must be provided to Apply") - } - result = &v1.LimitRange{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("limitranges"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 06c77b4c45..75d20648f5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NamespacesGetter has a method to return a NamespaceInterface. @@ -43,6 +40,7 @@ type NamespacesGetter interface { type NamespaceInterface interface { Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) @@ -50,178 +48,25 @@ type NamespaceInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) NamespaceExpansion } // namespaces implements NamespaceInterface type namespaces struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration] } // newNamespaces returns a Namespaces func newNamespaces(c *CoreV1Client) *namespaces { return &namespaces{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration]( + "namespaces", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Namespace { return &v1.Namespace{} }, + func() *v1.NamespaceList { return &v1.NamespaceList{} }), } } - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied namespace. -func (c *namespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *namespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go index be1116db15..4f720fb92e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -32,6 +32,6 @@ type NamespaceExpansion interface { // Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. func (c *namespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) + err = c.GetClient().Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) return } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index d9725b2f95..df1a7817f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NodesGetter has a method to return a NodeInterface. @@ -43,6 +40,7 @@ type NodesGetter interface { type NodeInterface interface { Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type NodeInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) NodeExpansion } // nodes implements NodeInterface type nodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration] } // newNodes returns a Nodes func newNodes(c *CoreV1Client) *nodes { return &nodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration]( + "nodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Node { return &v1.Node{} }, + func() *v1.NodeList { return &v1.NodeList{} }), } } - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Post(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *nodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *nodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go index bdf7bfed8d..df86253b0e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -34,7 +34,7 @@ type NodeExpansion interface { // the node that the server returns, or an error. func (c *nodes) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) { result := &v1.Node{} - err := c.client.Patch(types.StrategicMergePatchType). + err := c.GetClient().Patch(types.StrategicMergePatchType). Resource("nodes"). Name(nodeName). SubResource("status"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index a8e2295977..8be40f8665 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumesGetter has a method to return a PersistentVolumeInterface. @@ -43,6 +40,7 @@ type PersistentVolumesGetter interface { type PersistentVolumeInterface interface { Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type PersistentVolumeInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) PersistentVolumeExpansion } // persistentVolumes implements PersistentVolumeInterface type persistentVolumes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration] } // newPersistentVolumes returns a PersistentVolumes func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { return &persistentVolumes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration]( + "persistentvolumes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PersistentVolume { return &v1.PersistentVolume{} }, + func() *v1.PersistentVolumeList { return &v1.PersistentVolumeList{} }), } } - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Patch(pt). - Resource("persistentvolumes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume. -func (c *persistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 2e7f4fb44f..7721b00923 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. @@ -43,6 +40,7 @@ type PersistentVolumeClaimsGetter interface { type PersistentVolumeClaimInterface interface { Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type PersistentVolumeClaimInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } // persistentVolumeClaims implements PersistentVolumeClaimInterface type persistentVolumeClaims struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration] } // newPersistentVolumeClaims returns a PersistentVolumeClaims func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { return &persistentVolumeClaims{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration]( + "persistentvolumeclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{} }, + func() *v1.PersistentVolumeClaimList { return &v1.PersistentVolumeClaimList{} }), } } - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim. -func (c *persistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 63122cf3fb..470b7de7bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodsGetter has a method to return a PodInterface. @@ -43,6 +40,7 @@ type PodsGetter interface { type PodInterface interface { Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,6 +49,7 @@ type PodInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) @@ -59,209 +58,27 @@ type PodInterface interface { // pods implements PodInterface type pods struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration] } // newPods returns a Pods func newPods(c *CoreV1Client, namespace string) *pods { return &pods{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration]( + "pods", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Pod { return &v1.Pod{} }, + func() *v1.PodList { return &v1.PodList{} }), } } -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pod. -func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pods"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied pod. -func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("pods"). Name(podName). SubResource("ephemeralcontainers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go index 8b6e0e932f..a2d4d70d46 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -47,33 +47,33 @@ type PodExpansion interface { // Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() } // Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource. // Equivalent to calling EvictV1beta1. // Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1(). func (c *pods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } // Get constructs a request for getting the logs for a pod func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) + return c.GetClient().Get().Namespace(c.GetNamespace()).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) } // ProxyGet returns a response of the pod by calling it through the proxy. func (c *pods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("pods"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index ff90fc0e62..060a059093 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodTemplatesGetter has a method to return a PodTemplateInterface. @@ -55,154 +52,18 @@ type PodTemplateInterface interface { // podTemplates implements PodTemplateInterface type podTemplates struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration] } // newPodTemplates returns a PodTemplates func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { return &podTemplates{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration]( + "podtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PodTemplate { return &v1.PodTemplate{} }, + func() *v1.PodTemplateList { return &v1.PodTemplateList{} }), } } - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate. -func (c *podTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) { - if podTemplate == nil { - return nil, fmt.Errorf("podTemplate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podTemplate) - if err != nil { - return nil, err - } - name := podTemplate.Name - if name == nil { - return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") - } - result = &v1.PodTemplate{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podtemplates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 49c75d967b..9b275ed1ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -20,9 +20,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" @@ -30,8 +27,8 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicationControllersGetter has a method to return a ReplicationControllerInterface. @@ -44,6 +41,7 @@ type ReplicationControllersGetter interface { type ReplicationControllerInterface interface { Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -52,6 +50,7 @@ type ReplicationControllerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -61,209 +60,27 @@ type ReplicationControllerInterface interface { // replicationControllers implements ReplicationControllerInterface type replicationControllers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration] } // newReplicationControllers returns a ReplicationControllers func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { return &replicationControllers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration]( + "replicationcontrollers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ReplicationController { return &v1.ReplicationController{} }, + func() *v1.ReplicationControllerList { return &v1.ReplicationControllerList{} }), } } -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicationController. -func (c *replicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). @@ -276,8 +93,8 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 8444d164ed..4b2dcd3b59 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ResourceQuotasGetter has a method to return a ResourceQuotaInterface. @@ -43,6 +40,7 @@ type ResourceQuotasGetter interface { type ResourceQuotaInterface interface { Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type ResourceQuotaInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) ResourceQuotaExpansion } // resourceQuotas implements ResourceQuotaInterface type resourceQuotas struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration] } // newResourceQuotas returns a ResourceQuotas func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { return &resourceQuotas{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration]( + "resourcequotas", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ResourceQuota { return &v1.ResourceQuota{} }, + func() *v1.ResourceQuotaList { return &v1.ResourceQuotaList{} }), } } - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota. -func (c *resourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 4aba330381..12a8d1178f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SecretsGetter has a method to return a SecretInterface. @@ -55,154 +52,18 @@ type SecretInterface interface { // secrets implements SecretInterface type secrets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration] } // newSecrets returns a Secrets func newSecrets(c *CoreV1Client, namespace string) *secrets { return &secrets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration]( + "secrets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Secret { return &v1.Secret{} }, + func() *v1.SecretList { return &v1.SecretList{} }), } } - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied secret. -func (c *secrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) { - if secret == nil { - return nil, fmt.Errorf("secret provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(secret) - if err != nil { - return nil, err - } - name := secret.Name - if name == nil { - return nil, fmt.Errorf("secret.Name must be provided to Apply") - } - result = &v1.Secret{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("secrets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 3fe22ba444..ec935a3247 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServicesGetter has a method to return a ServiceInterface. @@ -43,6 +40,7 @@ type ServicesGetter interface { type ServiceInterface interface { Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) @@ -50,190 +48,25 @@ type ServiceInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) ServiceExpansion } // services implements ServiceInterface type services struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration] } // newServices returns a Services func newServices(c *CoreV1Client, namespace string) *services { return &services{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration]( + "services", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Service { return &v1.Service{} }, + func() *v1.ServiceList { return &v1.ServiceList{} }), } } - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied service. -func (c *services) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *services) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go index 4937fd1a39..9a6f781387 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go @@ -28,8 +28,8 @@ type ServiceExpansion interface { // ProxyGet returns a response of the service by calling it through the proxy. func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("services"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index bdf589b960..eb995d4548 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -20,9 +20,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" @@ -30,8 +27,8 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServiceAccountsGetter has a method to return a ServiceAccountInterface. @@ -58,163 +55,27 @@ type ServiceAccountInterface interface { // serviceAccounts implements ServiceAccountInterface type serviceAccounts struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration] } // newServiceAccounts returns a ServiceAccounts func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { return &serviceAccounts{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration]( + "serviceaccounts", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ServiceAccount { return &v1.ServiceAccount{} }, + func() *v1.ServiceAccountList { return &v1.ServiceAccountList{} }), } } -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount. -func (c *serviceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) { - if serviceAccount == nil { - return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceAccount) - if err != nil { - return nil, err - } - name := serviceAccount.Name - if name == nil { - return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") - } - result = &v1.ServiceAccount{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { result = &authenticationv1.TokenRequest{} - err = c.client.Post(). - Namespace(c.ns). + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). Resource("serviceaccounts"). Name(serviceAccountName). SubResource("token"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go index 63e616b033..1f927055cc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -55,154 +52,18 @@ type EndpointSliceInterface interface { // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.EndpointSlice { return &v1.EndpointSlice{} }, + func() *v1.EndpointSliceList { return &v1.EndpointSliceList{} }), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go index d159b5ea9e..6bbbde82ec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go @@ -44,22 +44,24 @@ var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice") // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1.EndpointSlice{}) + Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { + emptyResult := &v1.EndpointSliceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1.EndpointSliceList{}) + Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested endpointSlices. func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts)) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) + Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) + Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } @@ -114,7 +118,7 @@ func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EndpointSliceList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched endpointSlice. func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } @@ -144,11 +149,12 @@ func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discovery if name == nil { return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") } + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go index 2ade833029..298cfbc879 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -55,154 +52,18 @@ type EndpointSliceInterface interface { // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.EndpointSlice { return &v1beta1.EndpointSlice{} }, + func() *v1beta1.EndpointSliceList { return &v1beta1.EndpointSliceList{} }), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go index 2683718113..65cf69b9dc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -44,22 +44,24 @@ var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice") // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) + Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { + emptyResult := &v1beta1.EndpointSliceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{}) + Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested endpointSlices. func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts)) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } @@ -114,7 +118,7 @@ func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched endpointSlice. func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } @@ -144,11 +149,12 @@ func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discovery if name == nil { return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") } + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go index c9f2bbed50..d021a76c4e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go index 0928781f1e..1e79eb9845 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go @@ -44,22 +44,24 @@ var eventsKind = v1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) + Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { + emptyResult := &v1.EventList{} obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) + Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EventList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1.EventApplyConfig if name == nil { return nil, fmt.Errorf("event.Name must be provided to Apply") } + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index dfdf8b8979..77ca2e7756 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1beta1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Event { return &v1beta1.Event{} }, + func() *v1beta1.EventList { return &v1beta1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1beta1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 562f8d5e45..4ddbaa31af 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -44,11 +44,11 @@ type EventExpansion interface { // it must either match this event client's namespace, or this event client must // have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -64,11 +64,11 @@ func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // created with the "" namespace. // Update also requires the ResourceVersion to be set in the event object. func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -84,11 +84,11 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // The namespace must either match this event client's namespace, or this event client must // have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index 522b4dc063..b00f2126a5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -44,22 +44,24 @@ var eventsKind = v1beta1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1beta1.Event{}) + Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { + emptyResult := &v1beta1.EventList{} obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1beta1.EventList{}) + Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{}) + Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{}) + Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOpti // DeleteCollection deletes a collection of objects. func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.EventList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1beta1.EventApplyC if name == nil { return nil, fmt.Errorf("event.Name must be provided to Apply") } + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index ffe219fdaa..f86194bf05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.DaemonSet { return &v1beta1.DaemonSet{} }, + func() *v1beta1.DaemonSetList { return &v1beta1.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index c41d8dbc26..021fbb3b3b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +42,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) @@ -61,209 +62,27 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca } result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go index 5c409ac996..bd75b8a38e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -31,5 +31,5 @@ type DeploymentExpansion interface { // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. func (c *deployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index abe3d2da1f..f14943082d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -44,22 +44,24 @@ var daemonsetsKind = v1beta1.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + emptyResult := &v1beta1.DaemonSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta1.DaemonSetList{}) + Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested daemonSets. func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOpt // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1 if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index e399361a92..b81d4a96c4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -44,22 +44,24 @@ var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + emptyResult := &v1beta1.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *extensionsv1bet if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -179,33 +186,36 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *extension if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &v1beta1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } @@ -220,11 +230,12 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, if err != nil { return nil, err } + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 48ae51e80d..ae95682fc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -44,22 +44,24 @@ var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + emptyResult := &v1beta1.IngressList{} obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) + Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested ingresses. func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) return err @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOpti // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *extensionsv1beta1.In if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *extensionsv1be if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index a32022140a..d829a0c638 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -44,22 +44,24 @@ var networkpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("NetworkPolicy") // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { + emptyResult := &v1beta1.NetworkPolicyList{} obj, err := c.Fake. - Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1beta1.NetworkPolicyList{}) + Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts)) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } @@ -114,7 +118,7 @@ func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.NetworkPolicyList{}) return err @@ -122,11 +126,12 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched networkPolicy. func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } @@ -144,11 +149,12 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *extensio if name == nil { return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") } + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index 42da6fa8b6..5d94ba73b0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -44,22 +44,24 @@ var replicasetsKind = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + emptyResult := &v1beta1.ReplicaSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta1.ReplicaSetList{}) + Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested replicaSets. func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } @@ -126,7 +131,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } @@ -156,11 +162,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *extensionsv1bet if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } @@ -179,33 +186,36 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *extension if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &v1beta1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } @@ -220,11 +230,12 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, if err != nil { return nil, err } + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index dd4012cc23..4511c93fc2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index 978b26db03..afa8203c3d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface { // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.NetworkPolicy { return &v1beta1.NetworkPolicy{} }, + func() *v1beta1.NetworkPolicyList { return &v1beta1.NetworkPolicyList{} }), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 3c907a3a04..8973948f39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -43,6 +42,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) @@ -61,209 +62,27 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.ReplicaSet { return &v1beta1.ReplicaSet{} }, + func() *v1beta1.ReplicaSetList { return &v1beta1.ReplicaSetList{} }), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca } result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go index 922a60d89b..bf2b63fb2d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { + emptyResult := &v1.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.F if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go index 27d9586748..053de56ed1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1.SchemeGroupVersion.WithKind("PriorityLe // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { + emptyResult := &v1.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1. // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go index bd36c5e6a4..2606cee070 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.FlowSchema { return &v1.FlowSchema{} }, + func() *v1.FlowSchemaList { return &v1.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go index 797fe94035..64907af606 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PriorityLevelConfiguration { return &v1.PriorityLevelConfiguration{} }, + func() *v1.PriorityLevelConfigurationList { return &v1.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go index be7a7e390f..8b4435a8ad 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1beta1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { + emptyResult := &v1beta1.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta1.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go index 698a168b37..e139e4dceb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("Prior // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { + emptyResult := &v1beta1.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta1.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go index a9d38becf9..3c6805b9bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.FlowSchema { return &v1beta1.FlowSchema{} }, + func() *v1beta1.FlowSchemaList { return &v1beta1.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go index 41f35cbccd..049f4049d6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.PriorityLevelConfiguration { return &v1beta1.PriorityLevelConfiguration{} }, + func() *v1beta1.PriorityLevelConfigurationList { return &v1beta1.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go index 7ce6d2116b..41cad9b7a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1beta2.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { + emptyResult := &v1beta2.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta2.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go index 7340f8a09e..f9eac85d51 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta2.SchemeGroupVersion.WithKind("Prior // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { + emptyResult := &v1beta2.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta2.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go index 3a1f12b6a2..2706157628 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta2.FlowSchema { return &v1beta2.FlowSchema{} }, + func() *v1beta2.FlowSchemaList { return &v1beta2.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go index f028869f17..00ead4c60d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta2.PriorityLevelConfiguration { return &v1beta2.PriorityLevelConfiguration{} }, + func() *v1beta2.PriorityLevelConfigurationList { return &v1beta2.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go index 1371f6ed67..70dca796a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1beta3.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { + emptyResult := &v1beta3.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta3.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta3.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go index a0e266fecb..45836a6453 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta3.SchemeGroupVersion.WithKind("Prior // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { + emptyResult := &v1beta3.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta3.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta3.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go index 5fa39d6baf..35f600cdf4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go @@ -20,17 +20,14 @@ package v1beta3 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta3.FlowSchema { return &v1beta3.FlowSchema{} }, + func() *v1beta3.FlowSchemaList { return &v1beta3.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta3.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta3.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta3.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go index 49f05257c9..93842e0cf0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta3 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta3.PriorityLevelConfiguration { return &v1beta3.PriorityLevelConfiguration{} }, + func() *v1beta3.PriorityLevelConfigurationList { return &v1beta3.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta3.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go index 002de0dd8a..a9693338b5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go @@ -44,22 +44,24 @@ var ingressesKind = v1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1.Ingress{}) + Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { + emptyResult := &v1.IngressList{} obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1.IngressList{}) + Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (resu // Watch returns a watch.Interface that watches the requested ingresses. func (c *FakeIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) + Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) + Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1.Ingress{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts metav1.Del // DeleteCollection deletes a collection of objects. func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.IngressList{}) return err @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.Delete // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1.Ingress if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1.I if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go index 208a975082..cdbd594452 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go @@ -43,20 +43,22 @@ var ingressclassesKind = v1.SchemeGroupVersion.WithKind("IngressClass") // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. func (c *FakeIngressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1.IngressClass{}) + Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } // List takes label and field selectors, and returns the list of IngressClasses that match those selectors. func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { + emptyResult := &v1.IngressClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1.IngressClassList{}) + Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested ingressClasses. func (c *FakeIngressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts)) } // Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) + Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } // Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } @@ -107,7 +111,7 @@ func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.IngressClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched ingressClass. func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } @@ -136,10 +141,11 @@ func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networking if name == nil { return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") } + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index dde09774c4..9098bf42e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -44,22 +44,24 @@ var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy") // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1.NetworkPolicy{}) + Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + emptyResult := &v1.NetworkPolicyList{} obj, err := c.Fake. - Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1.NetworkPolicyList{}) + Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts)) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) + Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) + Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } @@ -114,7 +118,7 @@ func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts meta // DeleteCollection deletes a collection of objects. func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{}) return err @@ -122,11 +126,12 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1. // Patch applies the patch and returns the patched networkPolicy. func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } @@ -144,11 +149,12 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *networki if name == nil { return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") } + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go index 9923d6cbae..afaff4912a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Ingress { return &v1.Ingress{} }, + func() *v1.IngressList { return &v1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go index 16c8e48bf0..3301e87994 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -55,143 +52,18 @@ type IngressClassInterface interface { // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.IngressClass { return &v1.IngressClass{} }, + func() *v1.IngressClassList { return &v1.IngressClassList{} }), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index d7454ce145..ba2ef32dbd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface { // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.NetworkPolicy { return &v1.NetworkPolicy{} }, + func() *v1.NetworkPolicyList { return &v1.NetworkPolicyList{} }), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go index 4db8df68cb..6ce62b3313 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go @@ -43,20 +43,22 @@ var ipaddressesKind = v1alpha1.SchemeGroupVersion.WithKind("IPAddress") // Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ipaddressesResource, name), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } // List takes label and field selectors, and returns the list of IPAddresses that match those selectors. func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { + emptyResult := &v1alpha1.IPAddressList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(ipaddressesResource, ipaddressesKind, opts), &v1alpha1.IPAddressList{}) + Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested iPAddresses. func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ipaddressesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts)) } // Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } // Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } @@ -107,7 +111,7 @@ func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ipaddressesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.IPAddressList{}) return err @@ -115,10 +119,11 @@ func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched iPAddress. func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, name, pt, data, subresources...), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } @@ -136,10 +141,11 @@ func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alph if name == nil { return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") } + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, *name, types.ApplyPatchType, data), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go index 653ef631af..27a78e1ba6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go @@ -43,20 +43,22 @@ var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR") // Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(servicecidrsResource, name), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } // List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { + emptyResult := &v1alpha1.ServiceCIDRList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(servicecidrsResource, servicecidrsKind, opts), &v1alpha1.ServiceCIDRList{}) + Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested serviceCIDRs. func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(servicecidrsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts)) } // Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } // Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) { +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(servicecidrsResource, "status", serviceCIDR), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } @@ -118,7 +123,7 @@ func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(servicecidrsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{}) return err @@ -126,10 +131,11 @@ func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched serviceCIDR. func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, name, pt, data, subresources...), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } @@ -147,10 +153,11 @@ func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1a if name == nil { return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") } + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } @@ -169,10 +176,11 @@ func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *network if name == nil { return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") } + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go index fff193d68d..33e90d18a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IPAddressesGetter has a method to return a IPAddressInterface. @@ -55,143 +52,18 @@ type IPAddressInterface interface { // iPAddresses implements IPAddressInterface type iPAddresses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration] } // newIPAddresses returns a IPAddresses func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { return &iPAddresses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.IPAddress { return &v1alpha1.IPAddress{} }, + func() *v1alpha1.IPAddressList { return &v1alpha1.IPAddressList{} }), } } - -// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. -func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Get(). - Resource("ipaddresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. -func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.IPAddressList{} - err = c.client.Get(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested iPAddresses. -func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Post(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAddress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Put(). - Resource("ipaddresses"). - Name(iPAddress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAddress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. -func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ipaddresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ipaddresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched iPAddress. -func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Patch(pt). - Resource("ipaddresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. -func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { - if iPAddress == nil { - return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(iPAddress) - if err != nil { - return nil, err - } - name := iPAddress.Name - if name == nil { - return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") - } - result = &v1alpha1.IPAddress{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ipaddresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go index 100f290a19..b72fe5b696 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. @@ -43,6 +40,7 @@ type ServiceCIDRsGetter interface { type ServiceCIDRInterface interface { Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type ServiceCIDRInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) ServiceCIDRExpansion } // serviceCIDRs implements ServiceCIDRInterface type serviceCIDRs struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration] } // newServiceCIDRs returns a ServiceCIDRs func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { return &serviceCIDRs{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ServiceCIDR { return &v1alpha1.ServiceCIDR{} }, + func() *v1alpha1.ServiceCIDRList { return &v1alpha1.ServiceCIDRList{} }), } } - -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. -func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Get(). - Resource("servicecidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. -func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ServiceCIDRList{} - err = c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceCIDRs. -func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Post(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. -func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("servicecidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("servicecidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceCIDR. -func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(pt). - Resource("servicecidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. -func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go index 7a3b861be0..59bf762a01 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go @@ -44,22 +44,24 @@ var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + emptyResult := &v1beta1.IngressList{} obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) + Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested ingresses. func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) return err @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOpti // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1beta1.In if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1be if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go index 1804e61fc3..3001de8e42 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go @@ -43,20 +43,22 @@ var ingressclassesKind = v1beta1.SchemeGroupVersion.WithKind("IngressClass") // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. func (c *FakeIngressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1beta1.IngressClass{}) + Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } // List takes label and field selectors, and returns the list of IngressClasses that match those selectors. func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { + emptyResult := &v1beta1.IngressClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1beta1.IngressClassList{}) + Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested ingressClasses. func (c *FakeIngressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts)) } // Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{}) + Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } // Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } @@ -107,7 +111,7 @@ func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.IngressClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched ingressClass. func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1beta1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } @@ -136,10 +141,11 @@ func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networking if name == nil { return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") } + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1beta1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go new file mode 100644 index 0000000000..d8352bb79f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeIPAddresses implements IPAddressInterface +type FakeIPAddresses struct { + Fake *FakeNetworkingV1beta1 +} + +var ipaddressesResource = v1beta1.SchemeGroupVersion.WithResource("ipaddresses") + +var ipaddressesKind = v1beta1.SchemeGroupVersion.WithKind("IPAddress") + +// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. +func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. +func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IPAddressList, err error) { + emptyResult := &v1beta1.IPAddressList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.IPAddressList{ListMeta: obj.(*v1beta1.IPAddressList).ListMeta} + for _, item := range obj.(*v1beta1.IPAddressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested iPAddresses. +func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts)) +} + +// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. +func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ipaddressesResource, name, opts), &v1beta1.IPAddress{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.IPAddressList{}) + return err +} + +// Patch applies the patch and returns the patched iPAddress. +func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. +func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) { + if iPAddress == nil { + return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") + } + data, err := json.Marshal(iPAddress) + if err != nil { + return nil, err + } + name := iPAddress.Name + if name == nil { + return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") + } + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go index b8792a3064..bd72d59297 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go @@ -28,6 +28,10 @@ type FakeNetworkingV1beta1 struct { *testing.Fake } +func (c *FakeNetworkingV1beta1) IPAddresses() v1beta1.IPAddressInterface { + return &FakeIPAddresses{c} +} + func (c *FakeNetworkingV1beta1) Ingresses(namespace string) v1beta1.IngressInterface { return &FakeIngresses{c, namespace} } @@ -36,6 +40,10 @@ func (c *FakeNetworkingV1beta1) IngressClasses() v1beta1.IngressClassInterface { return &FakeIngressClasses{c} } +func (c *FakeNetworkingV1beta1) ServiceCIDRs() v1beta1.ServiceCIDRInterface { + return &FakeServiceCIDRs{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeNetworkingV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go new file mode 100644 index 0000000000..0eb5b2f2bb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go @@ -0,0 +1,186 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeServiceCIDRs implements ServiceCIDRInterface +type FakeServiceCIDRs struct { + Fake *FakeNetworkingV1beta1 +} + +var servicecidrsResource = v1beta1.SchemeGroupVersion.WithResource("servicecidrs") + +var servicecidrsKind = v1beta1.SchemeGroupVersion.WithKind("ServiceCIDR") + +// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. +func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. +func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ServiceCIDRList, err error) { + emptyResult := &v1beta1.ServiceCIDRList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ServiceCIDRList{ListMeta: obj.(*v1beta1.ServiceCIDRList).ListMeta} + for _, item := range obj.(*v1beta1.ServiceCIDRList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceCIDRs. +func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts)) +} + +// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. +func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1beta1.ServiceCIDR{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.ServiceCIDRList{}) + return err +} + +// Patch applies the patch and returns the patched serviceCIDR. +func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. +func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go index f74c7257ad..ac1ffbb984 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +type IPAddressExpansion interface{} + type IngressExpansion interface{} type IngressClassExpansion interface{} + +type ServiceCIDRExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index b309281afa..90be275adc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go index 50ccdfdbba..c55da4168f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -55,143 +52,18 @@ type IngressClassInterface interface { // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.IngressClass { return &v1beta1.IngressClass{} }, + func() *v1beta1.IngressClassList { return &v1beta1.IngressClassList{} }), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1beta1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..09e4139e74 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (*v1beta1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (*v1beta1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + *gentype.ClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration] +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1beta1Client) *iPAddresses { + return &iPAddresses{ + gentype.NewClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.IPAddress { return &v1beta1.IPAddress{} }, + func() *v1beta1.IPAddressList { return &v1beta1.IPAddressList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 851634ed0f..d35225abd8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -28,8 +28,10 @@ import ( type NetworkingV1beta1Interface interface { RESTClient() rest.Interface + IPAddressesGetter IngressesGetter IngressClassesGetter + ServiceCIDRsGetter } // NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. @@ -37,6 +39,10 @@ type NetworkingV1beta1Client struct { restClient rest.Interface } +func (c *NetworkingV1beta1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } @@ -45,6 +51,10 @@ func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface { return newIngressClasses(c) } +func (c *NetworkingV1beta1Client) ServiceCIDRs() ServiceCIDRInterface { + return newServiceCIDRs(c) +} + // NewForConfig creates a new NetworkingV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go new file mode 100644 index 0000000000..d3336f2ec0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. +// A group's client should implement this interface. +type ServiceCIDRsGetter interface { + ServiceCIDRs() ServiceCIDRInterface +} + +// ServiceCIDRInterface has methods to work with ServiceCIDR resources. +type ServiceCIDRInterface interface { + Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (*v1beta1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ServiceCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + ServiceCIDRExpansion +} + +// serviceCIDRs implements ServiceCIDRInterface +type serviceCIDRs struct { + *gentype.ClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration] +} + +// newServiceCIDRs returns a ServiceCIDRs +func newServiceCIDRs(c *NetworkingV1beta1Client) *serviceCIDRs { + return &serviceCIDRs{ + gentype.NewClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ServiceCIDR { return &v1beta1.ServiceCIDR{} }, + func() *v1beta1.ServiceCIDRList { return &v1beta1.ServiceCIDRList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go index 35cfbcae4b..0a52706284 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go @@ -43,20 +43,22 @@ var runtimeclassesKind = v1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1.RuntimeClass{}) + Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { + emptyResult := &v1.RuntimeClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1.RuntimeClassList{}) + Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested runtimeClasses. func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) + Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.RuntimeClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched runtimeClass. func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.Run if name == nil { return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") } + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go index 5ec38b203e..6c8110640d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/node/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1 "k8s.io/client-go/applyconfigurations/node/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.RuntimeClass { return &v1.RuntimeClass{} }, + func() *v1.RuntimeClassList { return &v1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go index 2ff7d3f973..bcd261d003 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -43,20 +43,22 @@ var runtimeclassesKind = v1alpha1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { + emptyResult := &v1alpha1.RuntimeClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1alpha1.RuntimeClassList{}) + Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested runtimeClasses. func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.RuntimeClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched runtimeClass. func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alph if name == nil { return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") } + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go index 039a7ace15..60aa4a213b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.RuntimeClass { return &v1alpha1.RuntimeClass{} }, + func() *v1alpha1.RuntimeClassList { return &v1alpha1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go index e6552f9aca..a3c8c018c5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -43,20 +43,22 @@ var runtimeclassesKind = v1beta1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { + emptyResult := &v1beta1.RuntimeClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1beta1.RuntimeClassList{}) + Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested runtimeClasses. func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.RuntimeClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched runtimeClass. func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta if name == nil { return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") } + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go index f8990adf1e..8e15d52889 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.RuntimeClass { return &v1beta1.RuntimeClass{} }, + func() *v1beta1.RuntimeClassList { return &v1beta1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go index cd1aac9c29..22173d36de 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1 import ( - rest "k8s.io/client-go/rest" + v1 "k8s.io/api/policy/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,17 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*v1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Eviction { return &v1.Eviction{} }), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go index 853187feb5..2c7e95b720 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go index 7b5f51caf4..de2bcc1b09 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go @@ -44,22 +44,24 @@ var poddisruptionbudgetsKind = v1.SchemeGroupVersion.WithKind("PodDisruptionBudg // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1.PodDisruptionBudget{}) + Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { + emptyResult := &v1.PodDisruptionBudgetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1.PodDisruptionBudgetList{}) + Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOpt // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts)) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) + Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) { +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } @@ -126,7 +131,7 @@ func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PodDisruptionBudgetList{}) return err @@ -134,11 +139,12 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts me // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } @@ -156,11 +162,12 @@ func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudge if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } @@ -179,11 +186,12 @@ func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptio if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go index 58db3acf9e..6d011cbce2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface { type PodDisruptionBudgetInterface interface { Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (*v1.PodDisruptionBudget, error) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PodDisruptionBudget { return &v1.PodDisruptionBudget{} }, + func() *v1.PodDisruptionBudgetList { return &v1.PodDisruptionBudgetList{} }), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go index 12e8e76edc..e003ece6bd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1beta1 import ( - rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/api/policy/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,17 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*v1beta1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1beta1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Eviction { return &v1beta1.Eviction{} }), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go index c003671f5d..d7c28987cf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index bcee8e7774..fbd9d01e07 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -44,22 +44,24 @@ var poddisruptionbudgetsKind = v1beta1.SchemeGroupVersion.WithKind("PodDisruptio // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + emptyResult := &v1beta1.PodDisruptionBudgetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) + Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts)) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) { +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } @@ -126,7 +131,7 @@ func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) return err @@ -134,11 +139,12 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1 // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } @@ -156,11 +162,12 @@ func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudge if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } @@ -179,11 +186,12 @@ func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptio if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 1687289921..4111812376 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface { type PodDisruptionBudgetInterface interface { Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.PodDisruptionBudget { return &v1beta1.PodDisruptionBudget{} }, + func() *v1beta1.PodDisruptionBudgetList { return &v1beta1.PodDisruptionBudgetList{} }), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 000d737f0f..19fff0ee47 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterRole { return &v1.ClusterRole{} }, + func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 31db43d984..77fb3785e4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} }, + func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index 5add33ddfb..6df91b1a86 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -43,20 +43,22 @@ var clusterrolesKind = v1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1.ClusterRole{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { + emptyResult := &v1.ClusterRoleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1.ClusterRoleList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *FakeClusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ClusterRoleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.Cluste if name == nil { return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") } + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index d42e93e653..6f3251408c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1.SchemeGroupVersion.WithKind("ClusterRoleBinding // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { + emptyResult := &v1.ClusterRoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOpti // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ClusterRoleBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts met // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding if name == nil { return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") } + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index a3bc5da663..ba9161940b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -44,22 +44,24 @@ var rolesKind = v1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1.Role{}) + Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { + emptyResult := &v1.RoleList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1.RoleList{}) + Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested roles. func (c *FakeRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1.Role{}) + Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1.Role{}) + Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts metav1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.RoleList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOpti // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfigurati if name == nil { return nil, fmt.Errorf("role.Name must be provided to Apply") } + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index eeb37e9db3..6d7d7d1933 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -44,22 +44,24 @@ var rolebindingsKind = v1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1.RoleBinding{}) + Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { + emptyResult := &v1.RoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1.RoleBindingList{}) + Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested roleBindings. func (c *FakeRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) + Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) + Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.RoleBindingList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBi if name == nil { return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") } + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index 93810a3ffa..b75b055f07 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Role { return &v1.Role{} }, + func() *v1.RoleList { return &v1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 2ace938604..fcbb1c0e26 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.RoleBinding { return &v1.RoleBinding{} }, + func() *v1.RoleBindingList { return &v1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index d6d30e99ef..f91e2c50a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterRole { return &v1alpha1.ClusterRole{} }, + func() *v1alpha1.ClusterRoleList { return &v1alpha1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 2eded92ac2..3f04526f0b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterRoleBinding { return &v1alpha1.ClusterRoleBinding{} }, + func() *v1alpha1.ClusterRoleBindingList { return &v1alpha1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 534a1990f5..34c9a853ea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -43,20 +43,22 @@ var clusterrolesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + emptyResult := &v1alpha1.ClusterRoleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1alpha1.ClusterRoleList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1. if name == nil { return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") } + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 0a4359392d..d42f763421 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRoleB // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + emptyResult := &v1alpha1.ClusterRoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1alpha1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding if name == nil { return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") } + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index a0e28348ae..9b0ba7cac6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -44,22 +44,24 @@ var rolesKind = v1alpha1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + emptyResult := &v1alpha1.RoleList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1alpha1.RoleList{}) + Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1al // Watch returns a watch.Interface that watches the requested roles. func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfi if name == nil { return nil, fmt.Errorf("role.Name must be provided to Apply") } + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index 76649f5c2b..f572945aca 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -44,22 +44,24 @@ var rolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + emptyResult := &v1alpha1.RoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1alpha1.RoleBindingList{}) + Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested roleBindings. func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1. if name == nil { return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") } + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 43c16fde74..4a1876a7d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1alpha1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.Role { return &v1alpha1.Role{} }, + func() *v1alpha1.RoleList { return &v1alpha1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1alpha1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 3129c9b4e8..6473132f1c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.RoleBinding { return &v1alpha1.RoleBinding{} }, + func() *v1alpha1.RoleBindingList { return &v1alpha1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index a3d67f0315..ed398333a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ClusterRole { return &v1beta1.ClusterRole{} }, + func() *v1beta1.ClusterRoleList { return &v1beta1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1beta1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index ae39cbb9ae..3010a99ae2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ClusterRoleBinding { return &v1beta1.ClusterRoleBinding{} }, + func() *v1beta1.ClusterRoleBindingList { return &v1beta1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 2a94a4315e..b7996c106f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -43,20 +43,22 @@ var clusterrolesKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + emptyResult := &v1beta1.ClusterRoleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1beta1.ClusterRoleList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.C if name == nil { return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") } + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index c9fd7c0cdd..8843757ace 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRoleBi // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + emptyResult := &v1beta1.ClusterRoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1beta1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding if name == nil { return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") } + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index 4158cf1d55..aa0fe28a13 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -44,22 +44,24 @@ var rolesKind = v1beta1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{}) + Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + emptyResult := &v1beta1.RoleList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1beta1.RoleList{}) + Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1be // Watch returns a watch.Interface that watches the requested roles. func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfig if name == nil { return nil, fmt.Errorf("role.Name must be provided to Apply") } + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 4616f0fd10..26c3c80458 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -44,22 +44,24 @@ var rolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) + Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + emptyResult := &v1beta1.RoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1beta1.RoleBindingList{}) + Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested roleBindings. func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.R if name == nil { return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") } + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index e789e42fe7..92e51da1b1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1beta1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Role { return &v1beta1.Role{} }, + func() *v1beta1.RoleList { return &v1beta1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1beta1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 1461ba3b6e..ad31bd051a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.RoleBinding { return &v1beta1.RoleBinding{} }, + func() *v1beta1.RoleBindingList { return &v1beta1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1beta1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go deleted file mode 100644 index da32b5caec..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClaimParameters implements ResourceClaimParametersInterface -type FakeResourceClaimParameters struct { - Fake *FakeResourceV1alpha2 - ns string -} - -var resourceclaimparametersResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimparameters") - -var resourceclaimparametersKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimParameters") - -// Get takes name of the resourceClaimParameters, and returns the corresponding resourceClaimParameters object, and an error if there is any. -func (c *FakeResourceClaimParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimparametersResource, c.ns, name), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// List takes label and field selectors, and returns the list of ResourceClaimParameters that match those selectors. -func (c *FakeResourceClaimParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimParametersList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimparametersResource, resourceclaimparametersKind, c.ns, opts), &v1alpha2.ResourceClaimParametersList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClaimParametersList{ListMeta: obj.(*v1alpha2.ResourceClaimParametersList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimParametersList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClaimParameters. -func (c *FakeResourceClaimParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimparametersResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceClaimParameters and creates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *FakeResourceClaimParameters) Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimparametersResource, c.ns, resourceClaimParameters), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// Update takes the representation of a resourceClaimParameters and updates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *FakeResourceClaimParameters) Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimparametersResource, c.ns, resourceClaimParameters), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// Delete takes name of the resourceClaimParameters and deletes it. Returns an error if one occurs. -func (c *FakeResourceClaimParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimparametersResource, c.ns, name, opts), &v1alpha2.ResourceClaimParameters{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClaimParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimparametersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimParametersList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClaimParameters. -func (c *FakeResourceClaimParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimparametersResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimParameters. -func (c *FakeResourceClaimParameters) Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - if resourceClaimParameters == nil { - return nil, fmt.Errorf("resourceClaimParameters provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaimParameters) - if err != nil { - return nil, err - } - name := resourceClaimParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimParameters.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimparametersResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go deleted file mode 100644 index 4d247c5136..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClasses implements ResourceClassInterface -type FakeResourceClasses struct { - Fake *FakeResourceV1alpha2 -} - -var resourceclassesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclasses") - -var resourceclassesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClass") - -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *FakeResourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(resourceclassesResource, name), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *FakeResourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(resourceclassesResource, resourceclassesKind, opts), &v1alpha2.ResourceClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClassList{ListMeta: obj.(*v1alpha2.ResourceClassList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClasses. -func (c *FakeResourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(resourceclassesResource, opts)) -} - -// Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. -func (c *FakeResourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(resourceclassesResource, name, opts), &v1alpha2.ResourceClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(resourceclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClass. -func (c *FakeResourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, name, pt, data, subresources...), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *FakeResourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { - if resourceClass == nil { - return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClass) - if err != nil { - return nil, err - } - name := resourceClass.Name - if name == nil { - return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go deleted file mode 100644 index c11762963f..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClassParameters implements ResourceClassParametersInterface -type FakeResourceClassParameters struct { - Fake *FakeResourceV1alpha2 - ns string -} - -var resourceclassparametersResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclassparameters") - -var resourceclassparametersKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClassParameters") - -// Get takes name of the resourceClassParameters, and returns the corresponding resourceClassParameters object, and an error if there is any. -func (c *FakeResourceClassParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclassparametersResource, c.ns, name), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// List takes label and field selectors, and returns the list of ResourceClassParameters that match those selectors. -func (c *FakeResourceClassParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassParametersList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclassparametersResource, resourceclassparametersKind, c.ns, opts), &v1alpha2.ResourceClassParametersList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClassParametersList{ListMeta: obj.(*v1alpha2.ResourceClassParametersList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClassParametersList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClassParameters. -func (c *FakeResourceClassParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclassparametersResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceClassParameters and creates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *FakeResourceClassParameters) Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclassparametersResource, c.ns, resourceClassParameters), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// Update takes the representation of a resourceClassParameters and updates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *FakeResourceClassParameters) Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclassparametersResource, c.ns, resourceClassParameters), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// Delete takes name of the resourceClassParameters and deletes it. Returns an error if one occurs. -func (c *FakeResourceClassParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclassparametersResource, c.ns, name, opts), &v1alpha2.ResourceClassParameters{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClassParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclassparametersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassParametersList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClassParameters. -func (c *FakeResourceClassParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclassparametersResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClassParameters. -func (c *FakeResourceClassParameters) Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) { - if resourceClassParameters == nil { - return nil, fmt.Errorf("resourceClassParameters provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClassParameters) - if err != nil { - return nil, err - } - name := resourceClassParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClassParameters.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclassparametersResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go deleted file mode 100644 index 72e81a29e3..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. -// A group's client should implement this interface. -type PodSchedulingContextsGetter interface { - PodSchedulingContexts(namespace string) PodSchedulingContextInterface -} - -// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. -type PodSchedulingContextInterface interface { - Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error) - Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) - UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) - Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) - ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) - PodSchedulingContextExpansion -} - -// podSchedulingContexts implements PodSchedulingContextInterface -type podSchedulingContexts struct { - client rest.Interface - ns string -} - -// newPodSchedulingContexts returns a PodSchedulingContexts -func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts { - return &podSchedulingContexts{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.PodSchedulingContextList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSchedulingContexts. -func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(podSchedulingContext.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(podSchedulingContext.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. -func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSchedulingContext. -func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go deleted file mode 100644 index cfb27c9db6..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimsGetter has a method to return a ResourceClaimInterface. -// A group's client should implement this interface. -type ResourceClaimsGetter interface { - ResourceClaims(namespace string) ResourceClaimInterface -} - -// ResourceClaimInterface has methods to work with ResourceClaim resources. -type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) - UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) - ResourceClaimExpansion -} - -// resourceClaims implements ResourceClaimInterface -type resourceClaims struct { - client rest.Interface - ns string -} - -// newResourceClaims returns a ResourceClaims -func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { - return &resourceClaims{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaims. -func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(resourceClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(resourceClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. -func (c *resourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaim. -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaims"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaims"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index d08afcb611..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimParametersGetter has a method to return a ResourceClaimParametersInterface. -// A group's client should implement this interface. -type ResourceClaimParametersGetter interface { - ResourceClaimParameters(namespace string) ResourceClaimParametersInterface -} - -// ResourceClaimParametersInterface has methods to work with ResourceClaimParameters resources. -type ResourceClaimParametersInterface interface { - Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (*v1alpha2.ResourceClaimParameters, error) - Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimParameters, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimParameters, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimParametersList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) - Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) - ResourceClaimParametersExpansion -} - -// resourceClaimParameters implements ResourceClaimParametersInterface -type resourceClaimParameters struct { - client rest.Interface - ns string -} - -// newResourceClaimParameters returns a ResourceClaimParameters -func newResourceClaimParameters(c *ResourceV1alpha2Client, namespace string) *resourceClaimParameters { - return &resourceClaimParameters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaimParameters, and returns the corresponding resourceClaimParameters object, and an error if there is any. -func (c *resourceClaimParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaimParameters that match those selectors. -func (c *resourceClaimParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimParametersList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimParametersList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaimParameters. -func (c *resourceClaimParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaimParameters and creates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *resourceClaimParameters) Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimParameters). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaimParameters and updates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *resourceClaimParameters) Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(resourceClaimParameters.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimParameters). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaimParameters and deletes it. Returns an error if one occurs. -func (c *resourceClaimParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaimParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaimParameters. -func (c *resourceClaimParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimParameters. -func (c *resourceClaimParameters) Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - if resourceClaimParameters == nil { - return nil, fmt.Errorf("resourceClaimParameters provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaimParameters) - if err != nil { - return nil, err - } - name := resourceClaimParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimParameters.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go deleted file mode 100644 index 3f4e320064..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. -// A group's client should implement this interface. -type ResourceClaimTemplatesGetter interface { - ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface -} - -// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. -type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) - ResourceClaimTemplateExpansion -} - -// resourceClaimTemplates implements ResourceClaimTemplateInterface -type resourceClaimTemplates struct { - client rest.Interface - ns string -} - -// newResourceClaimTemplates returns a ResourceClaimTemplates -func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { - return &resourceClaimTemplates{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaimTemplates. -func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(resourceClaimTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. -func (c *resourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - if resourceClaimTemplate == nil { - return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaimTemplate) - if err != nil { - return nil, err - } - name := resourceClaimTemplate.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go deleted file mode 100644 index 95a4ac5668..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClassesGetter has a method to return a ResourceClassInterface. -// A group's client should implement this interface. -type ResourceClassesGetter interface { - ResourceClasses() ResourceClassInterface -} - -// ResourceClassInterface has methods to work with ResourceClass resources. -type ResourceClassInterface interface { - Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) - Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) - Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) - ResourceClassExpansion -} - -// resourceClasses implements ResourceClassInterface -type resourceClasses struct { - client rest.Interface -} - -// newResourceClasses returns a ResourceClasses -func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { - return &resourceClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Get(). - Resource("resourceclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClassList{} - err = c.client.Get(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClasses. -func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Post(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Put(). - Resource("resourceclasses"). - Name(resourceClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. -func (c *resourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("resourceclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("resourceclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClass. -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Patch(pt). - Resource("resourceclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { - if resourceClass == nil { - return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClass) - if err != nil { - return nil, err - } - name := resourceClass.Name - if name == nil { - return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("resourceclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 8ac9be0784..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClassParametersGetter has a method to return a ResourceClassParametersInterface. -// A group's client should implement this interface. -type ResourceClassParametersGetter interface { - ResourceClassParameters(namespace string) ResourceClassParametersInterface -} - -// ResourceClassParametersInterface has methods to work with ResourceClassParameters resources. -type ResourceClassParametersInterface interface { - Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (*v1alpha2.ResourceClassParameters, error) - Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (*v1alpha2.ResourceClassParameters, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClassParameters, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassParametersList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) - Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) - ResourceClassParametersExpansion -} - -// resourceClassParameters implements ResourceClassParametersInterface -type resourceClassParameters struct { - client rest.Interface - ns string -} - -// newResourceClassParameters returns a ResourceClassParameters -func newResourceClassParameters(c *ResourceV1alpha2Client, namespace string) *resourceClassParameters { - return &resourceClassParameters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClassParameters, and returns the corresponding resourceClassParameters object, and an error if there is any. -func (c *resourceClassParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClassParameters that match those selectors. -func (c *resourceClassParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassParametersList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClassParametersList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClassParameters. -func (c *resourceClassParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClassParameters and creates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *resourceClassParameters) Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClassParameters). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClassParameters and updates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *resourceClassParameters) Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(resourceClassParameters.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClassParameters). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClassParameters and deletes it. Returns an error if one occurs. -func (c *resourceClassParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClassParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClassParameters. -func (c *resourceClassParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClassParameters. -func (c *resourceClassParameters) Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) { - if resourceClassParameters == nil { - return nil, fmt.Errorf("resourceClassParameters provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClassParameters) - if err != nil { - return nil, err - } - name := resourceClassParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClassParameters.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go deleted file mode 100644 index 302f370d52..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceSlicesGetter has a method to return a ResourceSliceInterface. -// A group's client should implement this interface. -type ResourceSlicesGetter interface { - ResourceSlices() ResourceSliceInterface -} - -// ResourceSliceInterface has methods to work with ResourceSlice resources. -type ResourceSliceInterface interface { - Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (*v1alpha2.ResourceSlice, error) - Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (*v1alpha2.ResourceSlice, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceSlice, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceSliceList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) - Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) - ResourceSliceExpansion -} - -// resourceSlices implements ResourceSliceInterface -type resourceSlices struct { - client rest.Interface -} - -// newResourceSlices returns a ResourceSlices -func newResourceSlices(c *ResourceV1alpha2Client) *resourceSlices { - return &resourceSlices{ - client: c.RESTClient(), - } -} - -// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any. -func (c *resourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Get(). - Resource("resourceslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors. -func (c *resourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceSliceList{} - err = c.client.Get(). - Resource("resourceslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceSlices. -func (c *resourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("resourceslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *resourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Post(). - Resource("resourceslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *resourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Put(). - Resource("resourceslices"). - Name(resourceSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs. -func (c *resourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("resourceslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("resourceslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceSlice. -func (c *resourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Patch(pt). - Resource("resourceslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice. -func (c *resourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) { - if resourceSlice == nil { - return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceSlice) - if err != nil { - return nil, err - } - name := resourceSlice.Name - if name == nil { - return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply") - } - result = &v1alpha2.ResourceSlice{} - err = c.client.Patch(types.ApplyPatchType). - Resource("resourceslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..35455dfa35 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// DeviceClassesGetter has a method to return a DeviceClassInterface. +// A group's client should implement this interface. +type DeviceClassesGetter interface { + DeviceClasses() DeviceClassInterface +} + +// DeviceClassInterface has methods to work with DeviceClass resources. +type DeviceClassInterface interface { + Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (*v1alpha3.DeviceClass, error) + Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (*v1alpha3.DeviceClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.DeviceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.DeviceClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) + Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) + DeviceClassExpansion +} + +// deviceClasses implements DeviceClassInterface +type deviceClasses struct { + *gentype.ClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration] +} + +// newDeviceClasses returns a DeviceClasses +func newDeviceClasses(c *ResourceV1alpha3Client) *deviceClasses { + return &deviceClasses{ + gentype.NewClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration]( + "deviceclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha3.DeviceClass { return &v1alpha3.DeviceClass{} }, + func() *v1alpha3.DeviceClassList { return &v1alpha3.DeviceClassList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go new file mode 100644 index 0000000000..fdb23fd37c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha3 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go new file mode 100644 index 0000000000..d96cbd2219 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + testing "k8s.io/client-go/testing" +) + +// FakeDeviceClasses implements DeviceClassInterface +type FakeDeviceClasses struct { + Fake *FakeResourceV1alpha3 +} + +var deviceclassesResource = v1alpha3.SchemeGroupVersion.WithResource("deviceclasses") + +var deviceclassesKind = v1alpha3.SchemeGroupVersion.WithKind("DeviceClass") + +// Get takes name of the deviceClass, and returns the corresponding deviceClass object, and an error if there is any. +func (c *FakeDeviceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(deviceclassesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// List takes label and field selectors, and returns the list of DeviceClasses that match those selectors. +func (c *FakeDeviceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.DeviceClassList, err error) { + emptyResult := &v1alpha3.DeviceClassList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(deviceclassesResource, deviceclassesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.DeviceClassList{ListMeta: obj.(*v1alpha3.DeviceClassList).ListMeta} + for _, item := range obj.(*v1alpha3.DeviceClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deviceClasses. +func (c *FakeDeviceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(deviceclassesResource, opts)) +} + +// Create takes the representation of a deviceClass and creates it. Returns the server's representation of the deviceClass, and an error, if there is any. +func (c *FakeDeviceClasses) Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// Update takes the representation of a deviceClass and updates it. Returns the server's representation of the deviceClass, and an error, if there is any. +func (c *FakeDeviceClasses) Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// Delete takes name of the deviceClass and deletes it. Returns an error if one occurs. +func (c *FakeDeviceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(deviceclassesResource, name, opts), &v1alpha3.DeviceClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDeviceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(deviceclassesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha3.DeviceClassList{}) + return err +} + +// Patch applies the patch and returns the patched deviceClass. +func (c *FakeDeviceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied deviceClass. +func (c *FakeDeviceClasses) Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) { + if deviceClass == nil { + return nil, fmt.Errorf("deviceClass provided to Apply must not be nil") + } + data, err := json.Marshal(deviceClass) + if err != nil { + return nil, err + } + name := deviceClass.Name + if name == nil { + return nil, fmt.Errorf("deviceClass.Name must be provided to Apply") + } + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go similarity index 56% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go index 54882f8175..54898993e5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go @@ -23,51 +23,53 @@ import ( json "encoding/json" "fmt" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" testing "k8s.io/client-go/testing" ) // FakePodSchedulingContexts implements PodSchedulingContextInterface type FakePodSchedulingContexts struct { - Fake *FakeResourceV1alpha2 + Fake *FakeResourceV1alpha3 ns string } -var podschedulingcontextsResource = v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts") +var podschedulingcontextsResource = v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts") -var podschedulingcontextsKind = v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContext") +var podschedulingcontextsKind = v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext") // Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewGetAction(podschedulingcontextsResource, c.ns, name), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewGetActionWithOptions(podschedulingcontextsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { +func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.PodSchedulingContextList, err error) { + emptyResult := &v1alpha3.PodSchedulingContextList{} obj, err := c.Fake. - Invokes(testing.NewListAction(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), &v1alpha2.PodSchedulingContextList{}) + Invokes(testing.NewListActionWithOptions(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } - list := &v1alpha2.PodSchedulingContextList{ListMeta: obj.(*v1alpha2.PodSchedulingContextList).ListMeta} - for _, item := range obj.(*v1alpha2.PodSchedulingContextList).Items { + list := &v1alpha3.PodSchedulingContextList{ListMeta: obj.(*v1alpha3.PodSchedulingContextList).ListMeta} + for _, item := range obj.(*v1alpha3.PodSchedulingContextList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,73 +80,77 @@ func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOption // Watch returns a watch.Interface that watches the requested podSchedulingContexts. func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(podschedulingcontextsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(podschedulingcontextsResource, c.ns, opts)) } // Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewCreateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewUpdateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) { +func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podschedulingcontextsResource, "status", c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(podschedulingcontextsResource, "status", c.ns, podSchedulingContext, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha3.PodSchedulingContext{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podschedulingcontextsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(podschedulingcontextsResource, c.ns, opts, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha2.PodSchedulingContextList{}) + _, err := c.Fake.Invokes(action, &v1alpha3.PodSchedulingContextList{}) return err } // Patch applies the patch and returns the patched podSchedulingContext. -func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, name, pt, data, subresources...), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { if podSchedulingContext == nil { return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") } @@ -156,18 +162,19 @@ func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingCont if name == nil { return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") } + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { if podSchedulingContext == nil { return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") } @@ -179,11 +186,12 @@ func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podScheduli if name == nil { return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") } + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go similarity index 51% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go index 6f69d0fa79..4523d9f09c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go @@ -19,46 +19,38 @@ limitations under the License. package fake import ( - v1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeResourceV1alpha2 struct { +type FakeResourceV1alpha3 struct { *testing.Fake } -func (c *FakeResourceV1alpha2) PodSchedulingContexts(namespace string) v1alpha2.PodSchedulingContextInterface { - return &FakePodSchedulingContexts{c, namespace} +func (c *FakeResourceV1alpha3) DeviceClasses() v1alpha3.DeviceClassInterface { + return &FakeDeviceClasses{c} } -func (c *FakeResourceV1alpha2) ResourceClaims(namespace string) v1alpha2.ResourceClaimInterface { - return &FakeResourceClaims{c, namespace} +func (c *FakeResourceV1alpha3) PodSchedulingContexts(namespace string) v1alpha3.PodSchedulingContextInterface { + return &FakePodSchedulingContexts{c, namespace} } -func (c *FakeResourceV1alpha2) ResourceClaimParameters(namespace string) v1alpha2.ResourceClaimParametersInterface { - return &FakeResourceClaimParameters{c, namespace} +func (c *FakeResourceV1alpha3) ResourceClaims(namespace string) v1alpha3.ResourceClaimInterface { + return &FakeResourceClaims{c, namespace} } -func (c *FakeResourceV1alpha2) ResourceClaimTemplates(namespace string) v1alpha2.ResourceClaimTemplateInterface { +func (c *FakeResourceV1alpha3) ResourceClaimTemplates(namespace string) v1alpha3.ResourceClaimTemplateInterface { return &FakeResourceClaimTemplates{c, namespace} } -func (c *FakeResourceV1alpha2) ResourceClasses() v1alpha2.ResourceClassInterface { - return &FakeResourceClasses{c} -} - -func (c *FakeResourceV1alpha2) ResourceClassParameters(namespace string) v1alpha2.ResourceClassParametersInterface { - return &FakeResourceClassParameters{c, namespace} -} - -func (c *FakeResourceV1alpha2) ResourceSlices() v1alpha2.ResourceSliceInterface { +func (c *FakeResourceV1alpha3) ResourceSlices() v1alpha3.ResourceSliceInterface { return &FakeResourceSlices{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeResourceV1alpha2) RESTClient() rest.Interface { +func (c *FakeResourceV1alpha3) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go similarity index 57% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go index 087e51f714..db38b3d60c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go @@ -23,51 +23,53 @@ import ( json "encoding/json" "fmt" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" testing "k8s.io/client-go/testing" ) // FakeResourceClaims implements ResourceClaimInterface type FakeResourceClaims struct { - Fake *FakeResourceV1alpha2 + Fake *FakeResourceV1alpha3 ns string } -var resourceclaimsResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaims") +var resourceclaimsResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaims") -var resourceclaimsKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim") +var resourceclaimsKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim") // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimsResource, c.ns, name), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewGetActionWithOptions(resourceclaimsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { +func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimList, err error) { + emptyResult := &v1alpha3.ResourceClaimList{} obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimsResource, resourceclaimsKind, c.ns, opts), &v1alpha2.ResourceClaimList{}) + Invokes(testing.NewListActionWithOptions(resourceclaimsResource, resourceclaimsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } - list := &v1alpha2.ResourceClaimList{ListMeta: obj.(*v1alpha2.ResourceClaimList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimList).Items { + list := &v1alpha3.ResourceClaimList{ListMeta: obj.(*v1alpha3.ResourceClaimList).ListMeta} + for _, item := range obj.(*v1alpha3.ResourceClaimList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,73 +80,77 @@ func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested resourceClaims. func (c *FakeResourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimsResource, c.ns, opts)) } // Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewCreateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewUpdateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) { +func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourceclaimsResource, "status", c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(resourceclaimsResource, "status", c.ns, resourceClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha3.ResourceClaim{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeResourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(resourceclaimsResource, c.ns, opts, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimList{}) + _, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimList{}) return err } // Patch applies the patch and returns the patched resourceClaim. -func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -156,18 +162,19 @@ func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -179,11 +186,12 @@ func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *res if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go similarity index 58% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go index 2a1b4554eb..28db7261f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go @@ -23,51 +23,53 @@ import ( json "encoding/json" "fmt" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" testing "k8s.io/client-go/testing" ) // FakeResourceClaimTemplates implements ResourceClaimTemplateInterface type FakeResourceClaimTemplates struct { - Fake *FakeResourceV1alpha2 + Fake *FakeResourceV1alpha3 ns string } -var resourceclaimtemplatesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates") +var resourceclaimtemplatesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates") -var resourceclaimtemplatesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimTemplate") +var resourceclaimtemplatesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate") // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimtemplatesResource, c.ns, name), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewGetActionWithOptions(resourceclaimtemplatesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { +func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimTemplateList, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplateList{} obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), &v1alpha2.ResourceClaimTemplateList{}) + Invokes(testing.NewListActionWithOptions(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } - list := &v1alpha2.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha2.ResourceClaimTemplateList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimTemplateList).Items { + list := &v1alpha3.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha3.ResourceClaimTemplateList).ListMeta} + for _, item := range obj.(*v1alpha3.ResourceClaimTemplateList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,61 +80,64 @@ func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptio // Watch returns a watch.Interface that watches the requested resourceClaimTemplates. func (c *FakeResourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimtemplatesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimtemplatesResource, c.ns, opts)) } // Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewCreateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewUpdateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha3.ResourceClaimTemplate{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeResourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimtemplatesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(resourceclaimtemplatesResource, c.ns, opts, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimTemplateList{}) + _, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimTemplateList{}) return err } // Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { if resourceClaimTemplate == nil { return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") } @@ -144,11 +149,12 @@ func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTem if name == nil { return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") } + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go similarity index 59% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go index 325e729e92..c355fc454a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go @@ -23,48 +23,50 @@ import ( json "encoding/json" "fmt" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" testing "k8s.io/client-go/testing" ) // FakeResourceSlices implements ResourceSliceInterface type FakeResourceSlices struct { - Fake *FakeResourceV1alpha2 + Fake *FakeResourceV1alpha3 } -var resourceslicesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceslices") +var resourceslicesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceslices") -var resourceslicesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceSlice") +var resourceslicesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice") // Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any. -func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) { +func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(resourceslicesResource, name), &v1alpha2.ResourceSlice{}) + Invokes(testing.NewRootGetActionWithOptions(resourceslicesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceSlice), err + return obj.(*v1alpha3.ResourceSlice), err } // List takes label and field selectors, and returns the list of ResourceSlices that match those selectors. -func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) { +func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceSliceList, err error) { + emptyResult := &v1alpha3.ResourceSliceList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(resourceslicesResource, resourceslicesKind, opts), &v1alpha2.ResourceSliceList{}) + Invokes(testing.NewRootListActionWithOptions(resourceslicesResource, resourceslicesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } - list := &v1alpha2.ResourceSliceList{ListMeta: obj.(*v1alpha2.ResourceSliceList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceSliceList).Items { + list := &v1alpha3.ResourceSliceList{ListMeta: obj.(*v1alpha3.ResourceSliceList).ListMeta} + for _, item := range obj.(*v1alpha3.ResourceSliceList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -75,56 +77,59 @@ func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested resourceSlices. func (c *FakeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(resourceslicesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(resourceslicesResource, opts)) } // Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) { +func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{}) + Invokes(testing.NewRootCreateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceSlice), err + return obj.(*v1alpha3.ResourceSlice), err } // Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) { +func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{}) + Invokes(testing.NewRootUpdateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceSlice), err + return obj.(*v1alpha3.ResourceSlice), err } // Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs. func (c *FakeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha2.ResourceSlice{}) + Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha3.ResourceSlice{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(resourceslicesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(resourceslicesResource, opts, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceSliceList{}) + _, err := c.Fake.Invokes(action, &v1alpha3.ResourceSliceList{}) return err } // Patch applies the patch and returns the patched resourceSlice. -func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) { +func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, name, pt, data, subresources...), &v1alpha2.ResourceSlice{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceSlice), err + return obj.(*v1alpha3.ResourceSlice), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice. -func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) { +func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) { if resourceSlice == nil { return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil") } @@ -136,10 +141,11 @@ func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev if name == nil { return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply") } + emptyResult := &v1alpha3.ResourceSlice{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceSlice{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceSlice), err + return obj.(*v1alpha3.ResourceSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go similarity index 83% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go index d11410bb9b..747e564b76 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go @@ -16,18 +16,14 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 + +type DeviceClassExpansion interface{} type PodSchedulingContextExpansion interface{} type ResourceClaimExpansion interface{} -type ResourceClaimParametersExpansion interface{} - type ResourceClaimTemplateExpansion interface{} -type ResourceClassExpansion interface{} - -type ResourceClassParametersExpansion interface{} - type ResourceSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go new file mode 100644 index 0000000000..af59843212 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. +// A group's client should implement this interface. +type PodSchedulingContextsGetter interface { + PodSchedulingContexts(namespace string) PodSchedulingContextInterface +} + +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. +type PodSchedulingContextInterface interface { + Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha3.PodSchedulingContext, error) + Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.PodSchedulingContext, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.PodSchedulingContextList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) + Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) + PodSchedulingContextExpansion +} + +// podSchedulingContexts implements PodSchedulingContextInterface +type podSchedulingContexts struct { + *gentype.ClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration] +} + +// newPodSchedulingContexts returns a PodSchedulingContexts +func newPodSchedulingContexts(c *ResourceV1alpha3Client, namespace string) *podSchedulingContexts { + return &podSchedulingContexts{ + gentype.NewClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration]( + "podschedulingcontexts", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.PodSchedulingContext { return &v1alpha3.PodSchedulingContext{} }, + func() *v1alpha3.PodSchedulingContextList { return &v1alpha3.PodSchedulingContextList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go similarity index 61% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go index 8e258b3e1c..879f0990d7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go @@ -16,64 +16,54 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "net/http" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type ResourceV1alpha2Interface interface { +type ResourceV1alpha3Interface interface { RESTClient() rest.Interface + DeviceClassesGetter PodSchedulingContextsGetter ResourceClaimsGetter - ResourceClaimParametersGetter ResourceClaimTemplatesGetter - ResourceClassesGetter - ResourceClassParametersGetter ResourceSlicesGetter } -// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. -type ResourceV1alpha2Client struct { +// ResourceV1alpha3Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha3Client struct { restClient rest.Interface } -func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { - return newPodSchedulingContexts(c, namespace) +func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface { + return newDeviceClasses(c) } -func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { - return newResourceClaims(c, namespace) +func (c *ResourceV1alpha3Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { + return newPodSchedulingContexts(c, namespace) } -func (c *ResourceV1alpha2Client) ResourceClaimParameters(namespace string) ResourceClaimParametersInterface { - return newResourceClaimParameters(c, namespace) +func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface { + return newResourceClaims(c, namespace) } -func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { +func (c *ResourceV1alpha3Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { return newResourceClaimTemplates(c, namespace) } -func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { - return newResourceClasses(c) -} - -func (c *ResourceV1alpha2Client) ResourceClassParameters(namespace string) ResourceClassParametersInterface { - return newResourceClassParameters(c, namespace) -} - -func (c *ResourceV1alpha2Client) ResourceSlices() ResourceSliceInterface { +func (c *ResourceV1alpha3Client) ResourceSlices() ResourceSliceInterface { return newResourceSlices(c) } -// NewForConfig creates a new ResourceV1alpha2Client for the given config. +// NewForConfig creates a new ResourceV1alpha3Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { +func NewForConfig(c *rest.Config) (*ResourceV1alpha3Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -85,9 +75,9 @@ func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. +// NewForConfigAndClient creates a new ResourceV1alpha3Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha3Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -96,12 +86,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Cli if err != nil { return nil, err } - return &ResourceV1alpha2Client{client}, nil + return &ResourceV1alpha3Client{client}, nil } -// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and +// NewForConfigOrDie creates a new ResourceV1alpha3Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha3Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -109,13 +99,13 @@ func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { return client } -// New creates a new ResourceV1alpha2Client for the given RESTClient. -func New(c rest.Interface) *ResourceV1alpha2Client { - return &ResourceV1alpha2Client{c} +// New creates a new ResourceV1alpha3Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha3Client { + return &ResourceV1alpha3Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha2.SchemeGroupVersion + gv := v1alpha3.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -129,7 +119,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { +func (c *ResourceV1alpha3Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go new file mode 100644 index 0000000000..2ac65c005e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimsGetter has a method to return a ResourceClaimInterface. +// A group's client should implement this interface. +type ResourceClaimsGetter interface { + ResourceClaims(namespace string) ResourceClaimInterface +} + +// ResourceClaimInterface has methods to work with ResourceClaim resources. +type ResourceClaimInterface interface { + Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (*v1alpha3.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + ResourceClaimExpansion +} + +// resourceClaims implements ResourceClaimInterface +type resourceClaims struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration] +} + +// newResourceClaims returns a ResourceClaims +func newResourceClaims(c *ResourceV1alpha3Client, namespace string) *resourceClaims { + return &resourceClaims{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration]( + "resourceclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.ResourceClaim { return &v1alpha3.ResourceClaim{} }, + func() *v1alpha3.ResourceClaimList { return &v1alpha3.ResourceClaimList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 0000000000..87997bfee5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. +// A group's client should implement this interface. +type ResourceClaimTemplatesGetter interface { + ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface +} + +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. +type ResourceClaimTemplateInterface interface { + Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha3.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha3.ResourceClaimTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) + ResourceClaimTemplateExpansion +} + +// resourceClaimTemplates implements ResourceClaimTemplateInterface +type resourceClaimTemplates struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration] +} + +// newResourceClaimTemplates returns a ResourceClaimTemplates +func newResourceClaimTemplates(c *ResourceV1alpha3Client, namespace string) *resourceClaimTemplates { + return &resourceClaimTemplates{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration]( + "resourceclaimtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.ResourceClaimTemplate { return &v1alpha3.ResourceClaimTemplate{} }, + func() *v1alpha3.ResourceClaimTemplateList { return &v1alpha3.ResourceClaimTemplateList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go new file mode 100644 index 0000000000..0819041408 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceSlicesGetter has a method to return a ResourceSliceInterface. +// A group's client should implement this interface. +type ResourceSlicesGetter interface { + ResourceSlices() ResourceSliceInterface +} + +// ResourceSliceInterface has methods to work with ResourceSlice resources. +type ResourceSliceInterface interface { + Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (*v1alpha3.ResourceSlice, error) + Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (*v1alpha3.ResourceSlice, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) + Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) + ResourceSliceExpansion +} + +// resourceSlices implements ResourceSliceInterface +type resourceSlices struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration] +} + +// newResourceSlices returns a ResourceSlices +func newResourceSlices(c *ResourceV1alpha3Client) *resourceSlices { + return &resourceSlices{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration]( + "resourceslices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha3.ResourceSlice { return &v1alpha3.ResourceSlice{} }, + func() *v1alpha3.ResourceSliceList { return &v1alpha3.ResourceSliceList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go index 40ab9fb407..92847184bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go @@ -43,20 +43,22 @@ var priorityclassesKind = v1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1.PriorityClass{}) + Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { + emptyResult := &v1.PriorityClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1.PriorityClassList{}) + Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *FakePriorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) + Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts meta // DeleteCollection deletes a collection of objects. func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PriorityClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1. // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli if name == nil { return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") } + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go index c68ec5da41..a28ef2fd4a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PriorityClass { return &v1.PriorityClass{} }, + func() *v1.PriorityClassList { return &v1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index 3c8404a725..055d458a3c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -43,20 +43,22 @@ var priorityclassesKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + emptyResult := &v1alpha1.PriorityClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1alpha1.PriorityClassList{}) + Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli if name == nil { return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") } + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index a9b8c19c78..5c78f3de9f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.PriorityClass { return &v1alpha1.PriorityClass{} }, + func() *v1alpha1.PriorityClassList { return &v1alpha1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index 4cf2e26c77..49d82a7ed9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -43,20 +43,22 @@ var priorityclassesKind = v1beta1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + emptyResult := &v1beta1.PriorityClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1beta1.PriorityClassList{}) + Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli if name == nil { return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") } + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 155476e4c7..9fef1d7596 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.PriorityClass { return &v1beta1.PriorityClass{} }, + func() *v1beta1.PriorityClassList { return &v1beta1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1beta1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go index d9dc4151e2..2e14db6000 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -55,143 +52,18 @@ type CSIDriverInterface interface { // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CSIDriver { return &v1.CSIDriver{} }, + func() *v1.CSIDriverList { return &v1.CSIDriverList{} }), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go index 17dbc8c1c8..6d28d7ed11 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -55,143 +52,18 @@ type CSINodeInterface interface { // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CSINode { return &v1.CSINode{} }, + func() *v1.CSINodeList { return &v1.CSINodeList{} }), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go index 6bb50e0da9..8a762b9fff 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.CSIStorageCapacity { return &v1.CSIStorageCapacity{} }, + func() *v1.CSIStorageCapacityList { return &v1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go index 4983227376..1df7c034bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go @@ -43,20 +43,22 @@ var csidriversKind = v1.SchemeGroupVersion.WithKind("CSIDriver") // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csidriversResource, name), &v1.CSIDriver{}) + Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { + emptyResult := &v1.CSIDriverList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1.CSIDriverList{}) + Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested cSIDrivers. func (c *FakeCSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts)) } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) + Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) + Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } @@ -107,7 +111,7 @@ func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts metav1.De // DeleteCollection deletes a collection of objects. func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CSIDriverList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.Delet // Patch applies the patch and returns the patched cSIDriver. func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } @@ -136,10 +141,11 @@ func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriv if name == nil { return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") } + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go index 0271a20f3d..e2b8e8cc8d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -43,20 +43,22 @@ var csinodesKind = v1.SchemeGroupVersion.WithKind("CSINode") // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. func (c *FakeCSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csinodesResource, name), &v1.CSINode{}) + Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { + emptyResult := &v1.CSINodeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1.CSINodeList{}) + Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested cSINodes. func (c *FakeCSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts)) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1.CSINode{}) + Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1.CSINode{}) + Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } @@ -107,7 +111,7 @@ func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts metav1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CSINodeList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteO // Patch applies the patch and returns the patched cSINode. func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } @@ -136,10 +141,11 @@ func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeAppl if name == nil { return nil, fmt.Errorf("cSINode.Name must be provided to Apply") } + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go index b12bbe3c15..a86014855e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1.SchemeGroupVersion.WithKind("CSIStorageCapacit // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1.CSIStorageCapacity{}) + Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { + emptyResult := &v1.CSIStorageCapacityList{} obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1.CSIStorageCapacityList{}) + Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOpt // Watch returns a watch.Interface that watches the requested cSIStorageCapacities. func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) } // Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) + Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) + Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CSIStorageCapacityList{}) return err @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts me // Patch applies the patch and returns the patched cSIStorageCapacity. func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity if name == nil { return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") } + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index e232f4c8d7..8910be1db9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -43,20 +43,22 @@ var storageclassesKind = v1.SchemeGroupVersion.WithKind("StorageClass") // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. func (c *FakeStorageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1.StorageClass{}) + Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { + emptyResult := &v1.StorageClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1.StorageClassList{}) + Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested storageClasses. func (c *FakeStorageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts)) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1.StorageClass{}) + Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1.StorageClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } @@ -107,7 +111,7 @@ func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.StorageClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } @@ -136,10 +141,11 @@ func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1. if name == nil { return nil, fmt.Errorf("storageClass.Name must be provided to Apply") } + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go index 3f5f2aec57..3d3d71ec50 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1.SchemeGroupVersion.WithKind("VolumeAttachment") // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1.VolumeAttachment{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { + emptyResult := &v1.VolumeAttachmentList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1.VolumeAttachmentList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOption // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts me // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.VolumeAttachmentList{}) return err @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 8e97d90a0f..d7b6ff68aa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -55,143 +52,18 @@ type StorageClassInterface interface { // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.StorageClass { return &v1.StorageClass{} }, + func() *v1.StorageClassList { return &v1.StorageClassList{} }), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index c1dbec84f4..3a0404284f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.VolumeAttachment { return &v1.VolumeAttachment{} }, + func() *v1.VolumeAttachmentList { return &v1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go index bf5d64dddc..6819deff62 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1alpha1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.CSIStorageCapacity { return &v1alpha1.CSIStorageCapacity{} }, + func() *v1alpha1.CSIStorageCapacityList { return &v1alpha1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go index c1614cda7d..0bcaccd208 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1alpha1.SchemeGroupVersion.WithKind("CSIStorageC // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { + emptyResult := &v1alpha1.CSIStorageCapacityList{} obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1alpha1.CSIStorageCapacityList{}) + Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions // Watch returns a watch.Interface that watches the requested cSIStorageCapacities. func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) } // Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.CSIStorageCapacityList{}) return err @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1 // Patch applies the patch and returns the patched cSIStorageCapacity. func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity if name == nil { return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") } + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index 9725d6d10b..a07247f8f3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttachme // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + emptyResult := &v1alpha1.VolumeAttachmentList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1alpha1.VolumeAttachmentList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) ( // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1 // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{}) return err @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.De // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go index d25263df48..0d7fe9aa8c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go @@ -43,20 +43,22 @@ var volumeattributesclassesKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAt // Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattributesclassesResource, name), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } // List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { + emptyResult := &v1alpha1.VolumeAttributesClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattributesclassesResource, volumeattributesclassesKind, opts), &v1alpha1.VolumeAttributesClassList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOpti // Watch returns a watch.Interface that watches the requested volumeAttributesClasses. func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattributesclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts)) } // Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } // Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } @@ -107,7 +111,7 @@ func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, o // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattributesclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttributesClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts // Patch applies the patch and returns the patched volumeAttributesClass. func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } @@ -136,10 +141,11 @@ func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttribute if name == nil { return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") } + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 58abb748f9..0982d5568a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.VolumeAttachment { return &v1alpha1.VolumeAttachment{} }, + func() *v1alpha1.VolumeAttachmentList { return &v1alpha1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go index 6633a4dc15..40cff75883 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. @@ -55,143 +52,18 @@ type VolumeAttributesClassInterface interface { // volumeAttributesClasses implements VolumeAttributesClassInterface type volumeAttributesClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration] } // newVolumeAttributesClasses returns a VolumeAttributesClasses func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { return &volumeAttributesClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.VolumeAttributesClass { return &v1alpha1.VolumeAttributesClass{} }, + func() *v1alpha1.VolumeAttributesClassList { return &v1alpha1.VolumeAttributesClassList{} }), } } - -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. -func (c *volumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. -func (c *volumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttributesClassList{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. -func (c *volumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Post(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Put(). - Resource("volumeattributesclasses"). - Name(volumeAttributesClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. -func (c *volumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattributesclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattributesclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttributesClass. -func (c *volumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(pt). - Resource("volumeattributesclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. -func (c *volumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - if volumeAttributesClass == nil { - return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttributesClass) - if err != nil { - return nil, err - } - name := volumeAttributesClass.Name - if name == nil { - return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattributesclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go index 04e677db05..2748919b40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -55,143 +52,18 @@ type CSIDriverInterface interface { // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CSIDriver { return &v1beta1.CSIDriver{} }, + func() *v1beta1.CSIDriverList { return &v1beta1.CSIDriverList{} }), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1beta1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go index c3760b5ce5..fe6fe228e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -55,143 +52,18 @@ type CSINodeInterface interface { // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1beta1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CSINode { return &v1beta1.CSINode{} }, + func() *v1beta1.CSINodeList { return &v1beta1.CSINodeList{} }), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1beta1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go index 98ba936dc4..e9ffc1df92 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1beta1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.CSIStorageCapacity { return &v1beta1.CSIStorageCapacity{} }, + func() *v1beta1.CSIStorageCapacityList { return &v1beta1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go index 4257aa6183..2b230707fe 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go @@ -43,20 +43,22 @@ var csidriversKind = v1beta1.SchemeGroupVersion.WithKind("CSIDriver") // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csidriversResource, name), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { + emptyResult := &v1beta1.CSIDriverList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1beta1.CSIDriverList{}) + Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested cSIDrivers. func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts)) } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } @@ -107,7 +111,7 @@ func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CSIDriverList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOpt // Patch applies the patch and returns the patched cSIDriver. func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } @@ -136,10 +141,11 @@ func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CS if name == nil { return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") } + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go index d38c104bc1..c5c2b58250 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go @@ -43,20 +43,22 @@ var csinodesKind = v1beta1.SchemeGroupVersion.WithKind("CSINode") // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csinodesResource, name), &v1beta1.CSINode{}) + Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { + emptyResult := &v1beta1.CSINodeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1beta1.CSINodeList{}) + Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested cSINodes. func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts)) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) + Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) + Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } @@ -107,7 +111,7 @@ func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CSINodeList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptio // Patch applies the patch and returns the patched cSINode. func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1beta1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } @@ -136,10 +141,11 @@ func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINod if name == nil { return nil, fmt.Errorf("cSINode.Name must be provided to Apply") } + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1beta1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go index d7bbb614b2..59a9aaf9df 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1beta1.SchemeGroupVersion.WithKind("CSIStorageCa // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { + emptyResult := &v1beta1.CSIStorageCapacityList{} obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1beta1.CSIStorageCapacityList{}) + Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions // Watch returns a watch.Interface that watches the requested cSIStorageCapacities. func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) } // Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CSIStorageCapacityList{}) return err @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1 // Patch applies the patch and returns the patched cSIStorageCapacity. func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity if name == nil { return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") } + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go index 6b5bb02fda..470281607f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go @@ -48,6 +48,10 @@ func (c *FakeStorageV1beta1) VolumeAttachments() v1beta1.VolumeAttachmentInterfa return &FakeVolumeAttachments{c} } +func (c *FakeStorageV1beta1) VolumeAttributesClasses() v1beta1.VolumeAttributesClassInterface { + return &FakeVolumeAttributesClasses{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeStorageV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 869e58b4f7..954a346081 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -43,20 +43,22 @@ var storageclassesKind = v1beta1.SchemeGroupVersion.WithKind("StorageClass") // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{}) + Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + emptyResult := &v1beta1.StorageClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1beta1.StorageClassList{}) + Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested storageClasses. func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts)) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } @@ -107,7 +111,7 @@ func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } @@ -136,10 +141,11 @@ func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1b if name == nil { return nil, fmt.Errorf("storageClass.Name must be provided to Apply") } + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1beta1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index e2b4a2eb1b..247f7ca627 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttachmen // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + emptyResult := &v1beta1.VolumeAttachmentList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1beta1.VolumeAttachmentList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) ( // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1 // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{}) return err @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.De // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go new file mode 100644 index 0000000000..3cef7291ab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface +type FakeVolumeAttributesClasses struct { + Fake *FakeStorageV1beta1 +} + +var volumeattributesclassesResource = v1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses") + +var volumeattributesclassesKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass") + +// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. +func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. +func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttributesClassList, err error) { + emptyResult := &v1beta1.VolumeAttributesClassList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.VolumeAttributesClassList{ListMeta: obj.(*v1beta1.VolumeAttributesClassList).ListMeta} + for _, item := range obj.(*v1beta1.VolumeAttributesClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. +func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts)) +} + +// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1beta1.VolumeAttributesClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttributesClassList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) { + if volumeAttributesClass == nil { + return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") + } + data, err := json.Marshal(volumeAttributesClass) + if err != nil { + return nil, err + } + name := volumeAttributesClass.Name + if name == nil { + return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") + } + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 1a202a928e..ebf78e10bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -27,3 +27,5 @@ type CSIStorageCapacityExpansion interface{} type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} + +type VolumeAttributesClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 4c7604bd29..3d1b59e36c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -33,6 +33,7 @@ type StorageV1beta1Interface interface { CSIStorageCapacitiesGetter StorageClassesGetter VolumeAttachmentsGetter + VolumeAttributesClassesGetter } // StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. @@ -60,6 +61,10 @@ func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } +func (c *StorageV1beta1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { + return newVolumeAttributesClasses(c) +} + // NewForConfig creates a new StorageV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 9b4ef231c8..fed699cc85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -55,143 +52,18 @@ type StorageClassInterface interface { // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1beta1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.StorageClass { return &v1beta1.StorageClass{} }, + func() *v1beta1.StorageClassList { return &v1beta1.StorageClassList{} }), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1beta1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 35a8b64fcc..01024ce485 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.VolumeAttachment { return &v1beta1.VolumeAttachment{} }, + func() *v1beta1.VolumeAttachmentList { return &v1beta1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..47eadcac63 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. +// A group's client should implement this interface. +type VolumeAttributesClassesGetter interface { + VolumeAttributesClasses() VolumeAttributesClassInterface +} + +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. +type VolumeAttributesClassInterface interface { + Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*v1beta1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1beta1.VolumeAttributesClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttributesClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) + VolumeAttributesClassExpansion +} + +// volumeAttributesClasses implements VolumeAttributesClassInterface +type volumeAttributesClasses struct { + *gentype.ClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration] +} + +// newVolumeAttributesClasses returns a VolumeAttributesClasses +func newVolumeAttributesClasses(c *StorageV1beta1Client) *volumeAttributesClasses { + return &volumeAttributesClasses{ + gentype.NewClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.VolumeAttributesClass { return &v1beta1.VolumeAttributesClass{} }, + func() *v1beta1.VolumeAttributesClassList { return &v1beta1.VolumeAttributesClassList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go index 9b5da88c72..c3ff235912 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go @@ -43,20 +43,22 @@ var storageversionmigrationsKind = v1alpha1.SchemeGroupVersion.WithKind("Storage // Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. func (c *FakeStorageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageversionmigrationsResource, name), &v1alpha1.StorageVersionMigration{}) + Invokes(testing.NewRootGetActionWithOptions(storageversionmigrationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersionMigration), err } // List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. func (c *FakeStorageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { + emptyResult := &v1alpha1.StorageVersionMigrationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageversionmigrationsResource, storageversionmigrationsKind, opts), &v1alpha1.StorageVersionMigrationList{}) + Invokes(testing.NewRootListActionWithOptions(storageversionmigrationsResource, storageversionmigrationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeStorageVersionMigrations) List(ctx context.Context, opts v1.ListOpt // Watch returns a watch.Interface that watches the requested storageVersionMigrations. func (c *FakeStorageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageversionmigrationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionmigrationsResource, opts)) } // Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. func (c *FakeStorageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageversionmigrationsResource, storageVersionMigration), &v1alpha1.StorageVersionMigration{}) + Invokes(testing.NewRootCreateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersionMigration), err } // Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. func (c *FakeStorageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageversionmigrationsResource, storageVersionMigration), &v1alpha1.StorageVersionMigration{}) + Invokes(testing.NewRootUpdateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersionMigration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStorageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) { +func (c *FakeStorageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(storageversionmigrationsResource, "status", storageVersionMigration), &v1alpha1.StorageVersionMigration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionmigrationsResource, "status", storageVersionMigration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersionMigration), err } @@ -118,7 +123,7 @@ func (c *FakeStorageVersionMigrations) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeStorageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageversionmigrationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(storageversionmigrationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionMigrationList{}) return err @@ -126,10 +131,11 @@ func (c *FakeStorageVersionMigrations) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched storageVersionMigration. func (c *FakeStorageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, name, pt, data, subresources...), &v1alpha1.StorageVersionMigration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersionMigration), err } @@ -147,10 +153,11 @@ func (c *FakeStorageVersionMigrations) Apply(ctx context.Context, storageVersion if name == nil { return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") } + emptyResult := &v1alpha1.StorageVersionMigration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, *name, types.ApplyPatchType, data), &v1alpha1.StorageVersionMigration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersionMigration), err } @@ -169,10 +176,11 @@ func (c *FakeStorageVersionMigrations) ApplyStatus(ctx context.Context, storageV if name == nil { return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") } + emptyResult := &v1alpha1.StorageVersionMigration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.StorageVersionMigration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersionMigration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go index be66a5b946..5fc0fd5197 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface. @@ -43,6 +40,7 @@ type StorageVersionMigrationsGetter interface { type StorageVersionMigrationInterface interface { Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type StorageVersionMigrationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) StorageVersionMigrationExpansion } // storageVersionMigrations implements StorageVersionMigrationInterface type storageVersionMigrations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration] } // newStorageVersionMigrations returns a StorageVersionMigrations func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations { return &storageVersionMigrations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]( + "storageversionmigrations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} }, + func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} }), } } - -// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. -func (c *storageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Get(). - Resource("storageversionmigrations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. -func (c *storageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.StorageVersionMigrationList{} - err = c.client.Get(). - Resource("storageversionmigrations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageVersionMigrations. -func (c *storageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageversionmigrations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *storageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Post(). - Resource("storageversionmigrations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersionMigration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *storageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Put(). - Resource("storageversionmigrations"). - Name(storageVersionMigration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersionMigration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *storageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Put(). - Resource("storageversionmigrations"). - Name(storageVersionMigration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersionMigration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs. -func (c *storageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageversionmigrations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageversionmigrations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageVersionMigration. -func (c *storageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Patch(pt). - Resource("storageversionmigrations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersionMigration. -func (c *storageVersionMigrations) Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversionmigrations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *storageVersionMigrations) ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversionmigrations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go index fe9e27985d..4ab267e42e 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type MutatingWebhookConfigurationLister interface { // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all MutatingWebhookConfigurations in the indexer. -func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.MutatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the MutatingWebhookConfiguration from the index for a given name. -func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1.MutatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("mutatingwebhookconfiguration"), name) - } - return obj.(*v1.MutatingWebhookConfiguration), nil + return &mutatingWebhookConfigurationLister{listers.New[*v1.MutatingWebhookConfiguration](indexer, v1.Resource("mutatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go index fff072f4c2..f233cdbe80 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyLister interface { // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*v1.ValidatingAdmissionPolicy](indexer, v1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go index 07856981e5..450a066725 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyBindingLister interface { // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*v1.ValidatingAdmissionPolicyBinding](indexer, v1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go index 1579a0ebb7..99045a6752 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingWebhookConfigurationLister interface { // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all ValidatingWebhookConfigurations in the indexer. -func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ValidatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. -func (s *validatingWebhookConfigurationLister) Get(name string) (*v1.ValidatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("validatingwebhookconfiguration"), name) - } - return obj.(*v1.ValidatingWebhookConfiguration), nil + return &validatingWebhookConfigurationLister{listers.New[*v1.ValidatingWebhookConfiguration](indexer, v1.Resource("validatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go index ae500183a7..c3aec2d736 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyLister interface { // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*v1alpha1.ValidatingAdmissionPolicy](indexer, v1alpha1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 552854daf2..5a2cf79c53 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyBindingLister interface { // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*v1alpha1.ValidatingAdmissionPolicyBinding](indexer, v1alpha1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 93c6096ee9..3bad49ac0a 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type MutatingWebhookConfigurationLister interface { // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all MutatingWebhookConfigurations in the indexer. -func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MutatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the MutatingWebhookConfiguration from the index for a given name. -func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("mutatingwebhookconfiguration"), name) - } - return obj.(*v1beta1.MutatingWebhookConfiguration), nil + return &mutatingWebhookConfigurationLister{listers.New[*v1beta1.MutatingWebhookConfiguration](indexer, v1beta1.Resource("mutatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go index 7018b3ceec..74d7c6ce3e 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyLister interface { // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*v1beta1.ValidatingAdmissionPolicy](indexer, v1beta1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 5fcebfd22f..668d652bb7 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyBindingLister interface { // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*v1beta1.ValidatingAdmissionPolicyBinding](indexer, v1beta1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 7c17fccb2e..16167d5738 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingWebhookConfigurationLister interface { // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all ValidatingWebhookConfigurations in the indexer. -func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. -func (s *validatingWebhookConfigurationLister) Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingwebhookconfiguration"), name) - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), nil + return &validatingWebhookConfigurationLister{listers.New[*v1beta1.ValidatingWebhookConfiguration](indexer, v1beta1.Resource("validatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go index 9a6d74b2bf..ce51b88f28 100644 --- a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type StorageVersionLister interface { // storageVersionLister implements the StorageVersionLister interface. type storageVersionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.StorageVersion] } // NewStorageVersionLister returns a new StorageVersionLister. func NewStorageVersionLister(indexer cache.Indexer) StorageVersionLister { - return &storageVersionLister{indexer: indexer} -} - -// List lists all StorageVersions in the indexer. -func (s *storageVersionLister) List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.StorageVersion)) - }) - return ret, err -} - -// Get retrieves the StorageVersion from the index for a given name. -func (s *storageVersionLister) Get(name string) (*v1alpha1.StorageVersion, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("storageversion"), name) - } - return obj.(*v1alpha1.StorageVersion), nil + return &storageVersionLister{listers.New[*v1alpha1.StorageVersion](indexer, v1alpha1.Resource("storageversion"))} } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go index 9e2f973746..b9061b159e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*v1.ControllerRevision](indexer, v1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -74,26 +66,5 @@ type ControllerRevisionNamespaceLister interface { // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("controllerrevision"), name) - } - return obj.(*v1.ControllerRevision), nil + listers.ResourceIndexer[*v1.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go index 061959e3da..4240cb6248 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*v1.DaemonSet](indexer, v1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*v1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -74,26 +66,5 @@ type DaemonSetNamespaceLister interface { // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("daemonset"), name) - } - return obj.(*v1.DaemonSet), nil + listers.ResourceIndexer[*v1.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go index 7704034172..3337026b73 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1.Deployment](indexer, v1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("deployment"), name) - } - return obj.(*v1.Deployment), nil + listers.ResourceIndexer[*v1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go index 3ca7757eb9..244df1d33f 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*v1.ReplicaSet](indexer, v1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*v1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -74,26 +66,5 @@ type ReplicaSetNamespaceLister interface { // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("replicaset"), name) - } - return obj.(*v1.ReplicaSet), nil + listers.ResourceIndexer[*v1.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go index f6899d5ff9..a8dc1b022a 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*v1.StatefulSet](indexer, v1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*v1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -74,26 +66,5 @@ type StatefulSetNamespaceLister interface { // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("statefulset"), name) - } - return obj.(*v1.StatefulSet), nil + listers.ResourceIndexer[*v1.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go index fc73de723f..c5e8fb3735 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*v1beta1.ControllerRevision](indexer, v1beta1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -74,26 +66,5 @@ type ControllerRevisionNamespaceLister interface { // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta1.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("controllerrevision"), name) - } - return obj.(*v1beta1.ControllerRevision), nil + listers.ResourceIndexer[*v1beta1.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go index 3fb70794ca..1bc6d45ad0 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) - } - return obj.(*v1beta1.Deployment), nil + listers.ResourceIndexer[*v1beta1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go index e3556bc398..4bf103aef7 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*v1beta1.StatefulSet](indexer, v1beta1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -74,26 +66,5 @@ type StatefulSetNamespaceLister interface { // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1beta1.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("statefulset"), name) - } - return obj.(*v1beta1.StatefulSet), nil + listers.ResourceIndexer[*v1beta1.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go index da2ce86005..de941bc691 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*v1beta2.ControllerRevision](indexer, v1beta2.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta2.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -74,26 +66,5 @@ type ControllerRevisionNamespaceLister interface { // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta2.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("controllerrevision"), name) - } - return obj.(*v1beta2.ControllerRevision), nil + listers.ResourceIndexer[*v1beta2.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go index 4b7aedd758..37784fe888 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*v1beta2.DaemonSet](indexer, v1beta2.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta2.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -74,26 +66,5 @@ type DaemonSetNamespaceLister interface { // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1beta2.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("daemonset"), name) - } - return obj.(*v1beta2.DaemonSet), nil + listers.ResourceIndexer[*v1beta2.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go index c2857bbc36..75acc1693e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1beta2.Deployment](indexer, v1beta2.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1beta2.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta2.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("deployment"), name) - } - return obj.(*v1beta2.Deployment), nil + listers.ResourceIndexer[*v1beta2.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go index 26b350ce8f..37ea97630e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*v1beta2.ReplicaSet](indexer, v1beta2.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta2.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -74,26 +66,5 @@ type ReplicaSetNamespaceLister interface { // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1beta2.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("replicaset"), name) - } - return obj.(*v1beta2.ReplicaSet), nil + listers.ResourceIndexer[*v1beta2.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go index fbbaf0133f..cc48a1473c 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*v1beta2.StatefulSet](indexer, v1beta2.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta2.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -74,26 +66,5 @@ type StatefulSetNamespaceLister interface { // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1beta2.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("statefulset"), name) - } - return obj.(*v1beta2.StatefulSet), nil + listers.ResourceIndexer[*v1beta2.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go index 8447f059d4..2cd4cc87bf 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v1.HorizontalPodAutoscaler](indexer, v1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v1.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v1.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v1.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go index a5cef27725..7c2806af21 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v2 import ( v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v2.HorizontalPodAutoscaler](indexer, v2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v2.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go index f1804e995b..a2befd6062 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v2beta1 import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v2beta1.HorizontalPodAutoscaler](indexer, v2beta1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta1.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2beta1.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go index b0dbaf9eb0..52bae849ba 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v2beta2 import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta2.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v2beta2.HorizontalPodAutoscaler](indexer, v2beta2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta2.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta2.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta2.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2beta2.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go index 8e49ed959f..a7a3abbfa3 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{indexer: indexer} -} - -// List lists all CronJobs in the indexer. -func (s *cronJobLister) List(selector labels.Selector) (ret []*v1.CronJob, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CronJob)) - }) - return ret, err + return &cronJobLister{listers.New[*v1.CronJob](indexer, v1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} + return cronJobNamespaceLister{listers.NewNamespaced[*v1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -74,26 +66,5 @@ type CronJobNamespaceLister interface { // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CronJobs in the indexer for a given namespace. -func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1.CronJob, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CronJob)) - }) - return ret, err -} - -// Get retrieves the CronJob from the indexer for a given namespace and name. -func (s cronJobNamespaceLister) Get(name string) (*v1.CronJob, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cronjob"), name) - } - return obj.(*v1.CronJob), nil + listers.ResourceIndexer[*v1.CronJob] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job.go b/vendor/k8s.io/client-go/listers/batch/v1/job.go index 3aba6b95fa..4078a9f7d8 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1/job.go +++ b/vendor/k8s.io/client-go/listers/batch/v1/job.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type JobLister interface { // jobLister implements the JobLister interface. type jobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Job] } // NewJobLister returns a new JobLister. func NewJobLister(indexer cache.Indexer) JobLister { - return &jobLister{indexer: indexer} -} - -// List lists all Jobs in the indexer. -func (s *jobLister) List(selector labels.Selector) (ret []*v1.Job, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Job)) - }) - return ret, err + return &jobLister{listers.New[*v1.Job](indexer, v1.Resource("job"))} } // Jobs returns an object that can list and get Jobs. func (s *jobLister) Jobs(namespace string) JobNamespaceLister { - return jobNamespaceLister{indexer: s.indexer, namespace: namespace} + return jobNamespaceLister{listers.NewNamespaced[*v1.Job](s.ResourceIndexer, namespace)} } // JobNamespaceLister helps list and get Jobs. @@ -74,26 +66,5 @@ type JobNamespaceLister interface { // jobNamespaceLister implements the JobNamespaceLister // interface. type jobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Jobs in the indexer for a given namespace. -func (s jobNamespaceLister) List(selector labels.Selector) (ret []*v1.Job, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Job)) - }) - return ret, err -} - -// Get retrieves the Job from the indexer for a given namespace and name. -func (s jobNamespaceLister) Get(name string) (*v1.Job, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("job"), name) - } - return obj.(*v1.Job), nil + listers.ResourceIndexer[*v1.Job] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go index 4842d5e5a1..33ed8219e3 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{indexer: indexer} -} - -// List lists all CronJobs in the indexer. -func (s *cronJobLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CronJob)) - }) - return ret, err + return &cronJobLister{listers.New[*v1beta1.CronJob](indexer, v1beta1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} + return cronJobNamespaceLister{listers.NewNamespaced[*v1beta1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -74,26 +66,5 @@ type CronJobNamespaceLister interface { // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CronJobs in the indexer for a given namespace. -func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CronJob)) - }) - return ret, err -} - -// Get retrieves the CronJob from the indexer for a given namespace and name. -func (s cronJobNamespaceLister) Get(name string) (*v1beta1.CronJob, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("cronjob"), name) - } - return obj.(*v1beta1.CronJob), nil + listers.ResourceIndexer[*v1beta1.CronJob] } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go index 0d04e118db..38e4a3a658 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/certificates/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CertificateSigningRequestLister interface { // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{indexer: indexer} -} - -// List lists all CertificateSigningRequests in the indexer. -func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1.CertificateSigningRequest, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CertificateSigningRequest)) - }) - return ret, err -} - -// Get retrieves the CertificateSigningRequest from the index for a given name. -func (s *certificateSigningRequestLister) Get(name string) (*v1.CertificateSigningRequest, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("certificatesigningrequest"), name) - } - return obj.(*v1.CertificateSigningRequest), nil + return &certificateSigningRequestLister{listers.New[*v1.CertificateSigningRequest](indexer, v1.Resource("certificatesigningrequest"))} } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go index b8049a7618..88e5365f40 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/certificates/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterTrustBundleLister interface { // clusterTrustBundleLister implements the ClusterTrustBundleLister interface. type clusterTrustBundleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ClusterTrustBundle] } // NewClusterTrustBundleLister returns a new ClusterTrustBundleLister. func NewClusterTrustBundleLister(indexer cache.Indexer) ClusterTrustBundleLister { - return &clusterTrustBundleLister{indexer: indexer} -} - -// List lists all ClusterTrustBundles in the indexer. -func (s *clusterTrustBundleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterTrustBundle)) - }) - return ret, err -} - -// Get retrieves the ClusterTrustBundle from the index for a given name. -func (s *clusterTrustBundleLister) Get(name string) (*v1alpha1.ClusterTrustBundle, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clustertrustbundle"), name) - } - return obj.(*v1alpha1.ClusterTrustBundle), nil + return &clusterTrustBundleLister{listers.New[*v1alpha1.ClusterTrustBundle](indexer, v1alpha1.Resource("clustertrustbundle"))} } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go index 471b5629b3..84b5ac4a90 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/certificates/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CertificateSigningRequestLister interface { // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{indexer: indexer} -} - -// List lists all CertificateSigningRequests in the indexer. -func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CertificateSigningRequest)) - }) - return ret, err -} - -// Get retrieves the CertificateSigningRequest from the index for a given name. -func (s *certificateSigningRequestLister) Get(name string) (*v1beta1.CertificateSigningRequest, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("certificatesigningrequest"), name) - } - return obj.(*v1beta1.CertificateSigningRequest), nil + return &certificateSigningRequestLister{listers.New[*v1beta1.CertificateSigningRequest](indexer, v1beta1.Resource("certificatesigningrequest"))} } diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go index de366d0e11..b36d8800e3 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/coordination/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{indexer: indexer} -} - -// List lists all Leases in the indexer. -func (s *leaseLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err + return &leaseLister{listers.New[*v1.Lease](indexer, v1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} + return leaseNamespaceLister{listers.NewNamespaced[*v1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -74,26 +66,5 @@ type LeaseNamespaceLister interface { // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Leases in the indexer for a given namespace. -func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err -} - -// Get retrieves the Lease from the indexer for a given namespace and name. -func (s leaseNamespaceLister) Get(name string) (*v1.Lease, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("lease"), name) - } - return obj.(*v1.Lease), nil + listers.ResourceIndexer[*v1.Lease] } diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..233bda975b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// LeaseCandidateListerExpansion allows custom methods to be added to +// LeaseCandidateLister. +type LeaseCandidateListerExpansion interface{} + +// LeaseCandidateNamespaceListerExpansion allows custom methods to be added to +// LeaseCandidateNamespaceLister. +type LeaseCandidateNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..b5e5fac9e4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/coordination/v1alpha1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// LeaseCandidateLister helps list LeaseCandidates. +// All objects returned here must be treated as read-only. +type LeaseCandidateLister interface { + // List lists all LeaseCandidates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error) + // LeaseCandidates returns an object that can list and get LeaseCandidates. + LeaseCandidates(namespace string) LeaseCandidateNamespaceLister + LeaseCandidateListerExpansion +} + +// leaseCandidateLister implements the LeaseCandidateLister interface. +type leaseCandidateLister struct { + listers.ResourceIndexer[*v1alpha1.LeaseCandidate] +} + +// NewLeaseCandidateLister returns a new LeaseCandidateLister. +func NewLeaseCandidateLister(indexer cache.Indexer) LeaseCandidateLister { + return &leaseCandidateLister{listers.New[*v1alpha1.LeaseCandidate](indexer, v1alpha1.Resource("leasecandidate"))} +} + +// LeaseCandidates returns an object that can list and get LeaseCandidates. +func (s *leaseCandidateLister) LeaseCandidates(namespace string) LeaseCandidateNamespaceLister { + return leaseCandidateNamespaceLister{listers.NewNamespaced[*v1alpha1.LeaseCandidate](s.ResourceIndexer, namespace)} +} + +// LeaseCandidateNamespaceLister helps list and get LeaseCandidates. +// All objects returned here must be treated as read-only. +type LeaseCandidateNamespaceLister interface { + // List lists all LeaseCandidates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error) + // Get retrieves the LeaseCandidate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.LeaseCandidate, error) + LeaseCandidateNamespaceListerExpansion +} + +// leaseCandidateNamespaceLister implements the LeaseCandidateNamespaceLister +// interface. +type leaseCandidateNamespaceLister struct { + listers.ResourceIndexer[*v1alpha1.LeaseCandidate] +} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go index 8dfdc1e9bc..dbe132696a 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/coordination/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{indexer: indexer} -} - -// List lists all Leases in the indexer. -func (s *leaseLister) List(selector labels.Selector) (ret []*v1beta1.Lease, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Lease)) - }) - return ret, err + return &leaseLister{listers.New[*v1beta1.Lease](indexer, v1beta1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} + return leaseNamespaceLister{listers.NewNamespaced[*v1beta1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -74,26 +66,5 @@ type LeaseNamespaceLister interface { // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Leases in the indexer for a given namespace. -func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Lease, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Lease)) - }) - return ret, err -} - -// Get retrieves the Lease from the indexer for a given namespace and name. -func (s leaseNamespaceLister) Get(name string) (*v1beta1.Lease, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("lease"), name) - } - return obj.(*v1beta1.Lease), nil + listers.ResourceIndexer[*v1beta1.Lease] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go index 5fcdac3c76..9e3274b5a3 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ComponentStatusLister interface { // componentStatusLister implements the ComponentStatusLister interface. type componentStatusLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ComponentStatus] } // NewComponentStatusLister returns a new ComponentStatusLister. func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { - return &componentStatusLister{indexer: indexer} -} - -// List lists all ComponentStatuses in the indexer. -func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ComponentStatus)) - }) - return ret, err -} - -// Get retrieves the ComponentStatus from the index for a given name. -func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("componentstatus"), name) - } - return obj.(*v1.ComponentStatus), nil + return &componentStatusLister{listers.New[*v1.ComponentStatus](indexer, v1.Resource("componentstatus"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go index 6a410e47c4..0dde404f2f 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ConfigMapLister interface { // configMapLister implements the ConfigMapLister interface. type configMapLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ConfigMap] } // NewConfigMapLister returns a new ConfigMapLister. func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { - return &configMapLister{indexer: indexer} -} - -// List lists all ConfigMaps in the indexer. -func (s *configMapLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ConfigMap)) - }) - return ret, err + return &configMapLister{listers.New[*v1.ConfigMap](indexer, v1.Resource("configmap"))} } // ConfigMaps returns an object that can list and get ConfigMaps. func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { - return configMapNamespaceLister{indexer: s.indexer, namespace: namespace} + return configMapNamespaceLister{listers.NewNamespaced[*v1.ConfigMap](s.ResourceIndexer, namespace)} } // ConfigMapNamespaceLister helps list and get ConfigMaps. @@ -74,26 +66,5 @@ type ConfigMapNamespaceLister interface { // configMapNamespaceLister implements the ConfigMapNamespaceLister // interface. type configMapNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ConfigMaps in the indexer for a given namespace. -func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ConfigMap)) - }) - return ret, err -} - -// Get retrieves the ConfigMap from the indexer for a given namespace and name. -func (s configMapNamespaceLister) Get(name string) (*v1.ConfigMap, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("configmap"), name) - } - return obj.(*v1.ConfigMap), nil + listers.ResourceIndexer[*v1.ConfigMap] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go index 4759ce808f..726b432559 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EndpointsLister interface { // endpointsLister implements the EndpointsLister interface. type endpointsLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Endpoints] } // NewEndpointsLister returns a new EndpointsLister. func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { - return &endpointsLister{indexer: indexer} -} - -// List lists all Endpoints in the indexer. -func (s *endpointsLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Endpoints)) - }) - return ret, err + return &endpointsLister{listers.New[*v1.Endpoints](indexer, v1.Resource("endpoints"))} } // Endpoints returns an object that can list and get Endpoints. func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { - return endpointsNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointsNamespaceLister{listers.NewNamespaced[*v1.Endpoints](s.ResourceIndexer, namespace)} } // EndpointsNamespaceLister helps list and get Endpoints. @@ -74,26 +66,5 @@ type EndpointsNamespaceLister interface { // endpointsNamespaceLister implements the EndpointsNamespaceLister // interface. type endpointsNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Endpoints in the indexer for a given namespace. -func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Endpoints)) - }) - return ret, err -} - -// Get retrieves the Endpoints from the indexer for a given namespace and name. -func (s endpointsNamespaceLister) Get(name string) (*v1.Endpoints, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("endpoints"), name) - } - return obj.(*v1.Endpoints), nil + listers.ResourceIndexer[*v1.Endpoints] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go index 4416e20120..5ab3a19321 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/event.go +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err + return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -74,26 +66,5 @@ type EventNamespaceLister interface { // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("event"), name) - } - return obj.(*v1.Event), nil + listers.ResourceIndexer[*v1.Event] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go index d8fa569cd3..5c7593cfa9 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type LimitRangeLister interface { // limitRangeLister implements the LimitRangeLister interface. type limitRangeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.LimitRange] } // NewLimitRangeLister returns a new LimitRangeLister. func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { - return &limitRangeLister{indexer: indexer} -} - -// List lists all LimitRanges in the indexer. -func (s *limitRangeLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.LimitRange)) - }) - return ret, err + return &limitRangeLister{listers.New[*v1.LimitRange](indexer, v1.Resource("limitrange"))} } // LimitRanges returns an object that can list and get LimitRanges. func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { - return limitRangeNamespaceLister{indexer: s.indexer, namespace: namespace} + return limitRangeNamespaceLister{listers.NewNamespaced[*v1.LimitRange](s.ResourceIndexer, namespace)} } // LimitRangeNamespaceLister helps list and get LimitRanges. @@ -74,26 +66,5 @@ type LimitRangeNamespaceLister interface { // limitRangeNamespaceLister implements the LimitRangeNamespaceLister // interface. type limitRangeNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all LimitRanges in the indexer for a given namespace. -func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.LimitRange)) - }) - return ret, err -} - -// Get retrieves the LimitRange from the indexer for a given namespace and name. -func (s limitRangeNamespaceLister) Get(name string) (*v1.LimitRange, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("limitrange"), name) - } - return obj.(*v1.LimitRange), nil + listers.ResourceIndexer[*v1.LimitRange] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go index 454aa1a0a2..a016447cfe 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type NamespaceLister interface { // namespaceLister implements the NamespaceLister interface. type namespaceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Namespace] } // NewNamespaceLister returns a new NamespaceLister. func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { - return &namespaceLister{indexer: indexer} -} - -// List lists all Namespaces in the indexer. -func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Namespace)) - }) - return ret, err -} - -// Get retrieves the Namespace from the index for a given name. -func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("namespace"), name) - } - return obj.(*v1.Namespace), nil + return &namespaceLister{listers.New[*v1.Namespace](indexer, v1.Resource("namespace"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go index 596049857f..495c6d79d1 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/node.go +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type NodeLister interface { // nodeLister implements the NodeLister interface. type nodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Node] } // NewNodeLister returns a new NodeLister. func NewNodeLister(indexer cache.Indexer) NodeLister { - return &nodeLister{indexer: indexer} -} - -// List lists all Nodes in the indexer. -func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Node)) - }) - return ret, err -} - -// Get retrieves the Node from the index for a given name. -func (s *nodeLister) Get(name string) (*v1.Node, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("node"), name) - } - return obj.(*v1.Node), nil + return &nodeLister{listers.New[*v1.Node](indexer, v1.Resource("node"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go index e7dfd4ac9f..17f19bb7a4 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PersistentVolumeLister interface { // persistentVolumeLister implements the PersistentVolumeLister interface. type persistentVolumeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PersistentVolume] } // NewPersistentVolumeLister returns a new PersistentVolumeLister. func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { - return &persistentVolumeLister{indexer: indexer} -} - -// List lists all PersistentVolumes in the indexer. -func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolume)) - }) - return ret, err -} - -// Get retrieves the PersistentVolume from the index for a given name. -func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("persistentvolume"), name) - } - return obj.(*v1.PersistentVolume), nil + return &persistentVolumeLister{listers.New[*v1.PersistentVolume](indexer, v1.Resource("persistentvolume"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go index fc71bb5a1f..ce9df90314 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PersistentVolumeClaimLister interface { // persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. type persistentVolumeClaimLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PersistentVolumeClaim] } // NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { - return &persistentVolumeClaimLister{indexer: indexer} -} - -// List lists all PersistentVolumeClaims in the indexer. -func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolumeClaim)) - }) - return ret, err + return &persistentVolumeClaimLister{listers.New[*v1.PersistentVolumeClaim](indexer, v1.Resource("persistentvolumeclaim"))} } // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { - return persistentVolumeClaimNamespaceLister{indexer: s.indexer, namespace: namespace} + return persistentVolumeClaimNamespaceLister{listers.NewNamespaced[*v1.PersistentVolumeClaim](s.ResourceIndexer, namespace)} } // PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. @@ -74,26 +66,5 @@ type PersistentVolumeClaimNamespaceLister interface { // persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister // interface. type persistentVolumeClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PersistentVolumeClaims in the indexer for a given namespace. -func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolumeClaim)) - }) - return ret, err -} - -// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. -func (s persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("persistentvolumeclaim"), name) - } - return obj.(*v1.PersistentVolumeClaim), nil + listers.ResourceIndexer[*v1.PersistentVolumeClaim] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go index ab8f0946c3..b17a8382a0 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/pod.go +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodLister interface { // podLister implements the PodLister interface. type podLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Pod] } // NewPodLister returns a new PodLister. func NewPodLister(indexer cache.Indexer) PodLister { - return &podLister{indexer: indexer} -} - -// List lists all Pods in the indexer. -func (s *podLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Pod)) - }) - return ret, err + return &podLister{listers.New[*v1.Pod](indexer, v1.Resource("pod"))} } // Pods returns an object that can list and get Pods. func (s *podLister) Pods(namespace string) PodNamespaceLister { - return podNamespaceLister{indexer: s.indexer, namespace: namespace} + return podNamespaceLister{listers.NewNamespaced[*v1.Pod](s.ResourceIndexer, namespace)} } // PodNamespaceLister helps list and get Pods. @@ -74,26 +66,5 @@ type PodNamespaceLister interface { // podNamespaceLister implements the PodNamespaceLister // interface. type podNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Pods in the indexer for a given namespace. -func (s podNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Pod)) - }) - return ret, err -} - -// Get retrieves the Pod from the indexer for a given namespace and name. -func (s podNamespaceLister) Get(name string) (*v1.Pod, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("pod"), name) - } - return obj.(*v1.Pod), nil + listers.ResourceIndexer[*v1.Pod] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go index 6c310045b7..8ac93148f5 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodTemplateLister interface { // podTemplateLister implements the PodTemplateLister interface. type podTemplateLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PodTemplate] } // NewPodTemplateLister returns a new PodTemplateLister. func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { - return &podTemplateLister{indexer: indexer} -} - -// List lists all PodTemplates in the indexer. -func (s *podTemplateLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodTemplate)) - }) - return ret, err + return &podTemplateLister{listers.New[*v1.PodTemplate](indexer, v1.Resource("podtemplate"))} } // PodTemplates returns an object that can list and get PodTemplates. func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { - return podTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} + return podTemplateNamespaceLister{listers.NewNamespaced[*v1.PodTemplate](s.ResourceIndexer, namespace)} } // PodTemplateNamespaceLister helps list and get PodTemplates. @@ -74,26 +66,5 @@ type PodTemplateNamespaceLister interface { // podTemplateNamespaceLister implements the PodTemplateNamespaceLister // interface. type podTemplateNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodTemplates in the indexer for a given namespace. -func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodTemplate)) - }) - return ret, err -} - -// Get retrieves the PodTemplate from the indexer for a given namespace and name. -func (s podTemplateNamespaceLister) Get(name string) (*v1.PodTemplate, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("podtemplate"), name) - } - return obj.(*v1.PodTemplate), nil + listers.ResourceIndexer[*v1.PodTemplate] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go index e28e2ef768..8ce23fc09a 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicationControllerLister interface { // replicationControllerLister implements the ReplicationControllerLister interface. type replicationControllerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ReplicationController] } // NewReplicationControllerLister returns a new ReplicationControllerLister. func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { - return &replicationControllerLister{indexer: indexer} -} - -// List lists all ReplicationControllers in the indexer. -func (s *replicationControllerLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicationController)) - }) - return ret, err + return &replicationControllerLister{listers.New[*v1.ReplicationController](indexer, v1.Resource("replicationcontroller"))} } // ReplicationControllers returns an object that can list and get ReplicationControllers. func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { - return replicationControllerNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicationControllerNamespaceLister{listers.NewNamespaced[*v1.ReplicationController](s.ResourceIndexer, namespace)} } // ReplicationControllerNamespaceLister helps list and get ReplicationControllers. @@ -74,26 +66,5 @@ type ReplicationControllerNamespaceLister interface { // replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister // interface. type replicationControllerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicationControllers in the indexer for a given namespace. -func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicationController)) - }) - return ret, err -} - -// Get retrieves the ReplicationController from the indexer for a given namespace and name. -func (s replicationControllerNamespaceLister) Get(name string) (*v1.ReplicationController, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("replicationcontroller"), name) - } - return obj.(*v1.ReplicationController), nil + listers.ResourceIndexer[*v1.ReplicationController] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go index 9c00b49d4f..4b46194a25 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ResourceQuotaLister interface { // resourceQuotaLister implements the ResourceQuotaLister interface. type resourceQuotaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ResourceQuota] } // NewResourceQuotaLister returns a new ResourceQuotaLister. func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { - return &resourceQuotaLister{indexer: indexer} -} - -// List lists all ResourceQuotas in the indexer. -func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResourceQuota)) - }) - return ret, err + return &resourceQuotaLister{listers.New[*v1.ResourceQuota](indexer, v1.Resource("resourcequota"))} } // ResourceQuotas returns an object that can list and get ResourceQuotas. func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { - return resourceQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} + return resourceQuotaNamespaceLister{listers.NewNamespaced[*v1.ResourceQuota](s.ResourceIndexer, namespace)} } // ResourceQuotaNamespaceLister helps list and get ResourceQuotas. @@ -74,26 +66,5 @@ type ResourceQuotaNamespaceLister interface { // resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister // interface. type resourceQuotaNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceQuotas in the indexer for a given namespace. -func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResourceQuota)) - }) - return ret, err -} - -// Get retrieves the ResourceQuota from the indexer for a given namespace and name. -func (s resourceQuotaNamespaceLister) Get(name string) (*v1.ResourceQuota, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("resourcequota"), name) - } - return obj.(*v1.ResourceQuota), nil + listers.ResourceIndexer[*v1.ResourceQuota] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go index d386d4d5cb..47a0c9a084 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/secret.go +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type SecretLister interface { // secretLister implements the SecretLister interface. type secretLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Secret] } // NewSecretLister returns a new SecretLister. func NewSecretLister(indexer cache.Indexer) SecretLister { - return &secretLister{indexer: indexer} -} - -// List lists all Secrets in the indexer. -func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Secret)) - }) - return ret, err + return &secretLister{listers.New[*v1.Secret](indexer, v1.Resource("secret"))} } // Secrets returns an object that can list and get Secrets. func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { - return secretNamespaceLister{indexer: s.indexer, namespace: namespace} + return secretNamespaceLister{listers.NewNamespaced[*v1.Secret](s.ResourceIndexer, namespace)} } // SecretNamespaceLister helps list and get Secrets. @@ -74,26 +66,5 @@ type SecretNamespaceLister interface { // secretNamespaceLister implements the SecretNamespaceLister // interface. type secretNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Secrets in the indexer for a given namespace. -func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Secret)) - }) - return ret, err -} - -// Get retrieves the Secret from the indexer for a given namespace and name. -func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("secret"), name) - } - return obj.(*v1.Secret), nil + listers.ResourceIndexer[*v1.Secret] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go index 51026d7b4b..536fb337f9 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/service.go +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ServiceLister interface { // serviceLister implements the ServiceLister interface. type serviceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Service] } // NewServiceLister returns a new ServiceLister. func NewServiceLister(indexer cache.Indexer) ServiceLister { - return &serviceLister{indexer: indexer} -} - -// List lists all Services in the indexer. -func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Service)) - }) - return ret, err + return &serviceLister{listers.New[*v1.Service](indexer, v1.Resource("service"))} } // Services returns an object that can list and get Services. func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { - return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} + return serviceNamespaceLister{listers.NewNamespaced[*v1.Service](s.ResourceIndexer, namespace)} } // ServiceNamespaceLister helps list and get Services. @@ -74,26 +66,5 @@ type ServiceNamespaceLister interface { // serviceNamespaceLister implements the ServiceNamespaceLister // interface. type serviceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Services in the indexer for a given namespace. -func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Service)) - }) - return ret, err -} - -// Get retrieves the Service from the indexer for a given namespace and name. -func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("service"), name) - } - return obj.(*v1.Service), nil + listers.ResourceIndexer[*v1.Service] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go index aa9554d8bb..8a4af4f4cb 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ServiceAccountLister interface { // serviceAccountLister implements the ServiceAccountLister interface. type serviceAccountLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ServiceAccount] } // NewServiceAccountLister returns a new ServiceAccountLister. func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { - return &serviceAccountLister{indexer: indexer} -} - -// List lists all ServiceAccounts in the indexer. -func (s *serviceAccountLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ServiceAccount)) - }) - return ret, err + return &serviceAccountLister{listers.New[*v1.ServiceAccount](indexer, v1.Resource("serviceaccount"))} } // ServiceAccounts returns an object that can list and get ServiceAccounts. func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { - return serviceAccountNamespaceLister{indexer: s.indexer, namespace: namespace} + return serviceAccountNamespaceLister{listers.NewNamespaced[*v1.ServiceAccount](s.ResourceIndexer, namespace)} } // ServiceAccountNamespaceLister helps list and get ServiceAccounts. @@ -74,26 +66,5 @@ type ServiceAccountNamespaceLister interface { // serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister // interface. type serviceAccountNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ServiceAccounts in the indexer for a given namespace. -func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ServiceAccount)) - }) - return ret, err -} - -// Get retrieves the ServiceAccount from the indexer for a given namespace and name. -func (s serviceAccountNamespaceLister) Get(name string) (*v1.ServiceAccount, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("serviceaccount"), name) - } - return obj.(*v1.ServiceAccount), nil + listers.ResourceIndexer[*v1.ServiceAccount] } diff --git a/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go index 4dd46ff1bf..dcb18f19a8 100644 --- a/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{indexer: indexer} -} - -// List lists all EndpointSlices in the indexer. -func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EndpointSlice)) - }) - return ret, err + return &endpointSliceLister{listers.New[*v1.EndpointSlice](indexer, v1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointSliceNamespaceLister{listers.NewNamespaced[*v1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -74,26 +66,5 @@ type EndpointSliceNamespaceLister interface { // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EndpointSlices in the indexer for a given namespace. -func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EndpointSlice)) - }) - return ret, err -} - -// Get retrieves the EndpointSlice from the indexer for a given namespace and name. -func (s endpointSliceNamespaceLister) Get(name string) (*v1.EndpointSlice, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("endpointslice"), name) - } - return obj.(*v1.EndpointSlice), nil + listers.ResourceIndexer[*v1.EndpointSlice] } diff --git a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go index e92872d5f4..d3762f5c28 100644 --- a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/discovery/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{indexer: indexer} -} - -// List lists all EndpointSlices in the indexer. -func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.EndpointSlice)) - }) - return ret, err + return &endpointSliceLister{listers.New[*v1beta1.EndpointSlice](indexer, v1beta1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointSliceNamespaceLister{listers.NewNamespaced[*v1beta1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -74,26 +66,5 @@ type EndpointSliceNamespaceLister interface { // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EndpointSlices in the indexer for a given namespace. -func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.EndpointSlice)) - }) - return ret, err -} - -// Get retrieves the EndpointSlice from the indexer for a given namespace and name. -func (s endpointSliceNamespaceLister) Get(name string) (*v1beta1.EndpointSlice, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("endpointslice"), name) - } - return obj.(*v1beta1.EndpointSlice), nil + listers.ResourceIndexer[*v1beta1.EndpointSlice] } diff --git a/vendor/k8s.io/client-go/listers/doc.go b/vendor/k8s.io/client-go/listers/doc.go new file mode 100644 index 0000000000..96c330c931 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package listers provides generated listers for Kubernetes APIs. +package listers // import "k8s.io/client-go/listers" diff --git a/vendor/k8s.io/client-go/listers/events/v1/event.go b/vendor/k8s.io/client-go/listers/events/v1/event.go index 4abe841e26..66e3c64669 100644 --- a/vendor/k8s.io/client-go/listers/events/v1/event.go +++ b/vendor/k8s.io/client-go/listers/events/v1/event.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/events/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err + return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -74,26 +66,5 @@ type EventNamespaceLister interface { // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("event"), name) - } - return obj.(*v1.Event), nil + listers.ResourceIndexer[*v1.Event] } diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go index 41a521be6f..3d51bb2656 100644 --- a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/events/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Event)) - }) - return ret, err + return &eventLister{listers.New[*v1beta1.Event](indexer, v1beta1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*v1beta1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -74,26 +66,5 @@ type EventNamespaceLister interface { // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1beta1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("event"), name) - } - return obj.(*v1beta1.Event), nil + listers.ResourceIndexer[*v1beta1.Event] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go index 900475410b..4510b42360 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*v1beta1.DaemonSet](indexer, v1beta1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -74,26 +66,5 @@ type DaemonSetNamespaceLister interface { // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1beta1.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("daemonset"), name) - } - return obj.(*v1beta1.DaemonSet), nil + listers.ResourceIndexer[*v1beta1.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go index 42b5a07231..149047c973 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) - } - return obj.(*v1beta1.Deployment), nil + listers.ResourceIndexer[*v1beta1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go index 1cb7677bd8..b714eebb35 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -74,26 +66,5 @@ type IngressNamespaceLister interface { // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) - } - return obj.(*v1beta1.Ingress), nil + listers.ResourceIndexer[*v1beta1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go index 84419a8e96..b31099c266 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{indexer: indexer} -} - -// List lists all NetworkPolicies in the indexer. -func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err + return &networkPolicyLister{listers.New[*v1beta1.NetworkPolicy](indexer, v1beta1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return networkPolicyNamespaceLister{listers.NewNamespaced[*v1beta1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -74,26 +66,5 @@ type NetworkPolicyNamespaceLister interface { // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NetworkPolicies in the indexer for a given namespace. -func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. -func (s networkPolicyNamespaceLister) Get(name string) (*v1beta1.NetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("networkpolicy"), name) - } - return obj.(*v1beta1.NetworkPolicy), nil + listers.ResourceIndexer[*v1beta1.NetworkPolicy] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go index a5ec3229bc..604bee80ba 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*v1beta1.ReplicaSet](indexer, v1beta1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -74,26 +66,5 @@ type ReplicaSetNamespaceLister interface { // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1beta1.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("replicaset"), name) - } - return obj.(*v1beta1.ReplicaSet), nil + listers.ResourceIndexer[*v1beta1.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go index 43ccd4e5ff..ba7e514874 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("flowschema"), name) - } - return obj.(*v1.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1.FlowSchema](indexer, v1.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go index 61189b9cf9..61f5b9fe6d 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1.PriorityLevelConfiguration](indexer, v1.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go index 7927a8411e..59bca6ce4e 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta1.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("flowschema"), name) - } - return obj.(*v1beta1.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1beta1.FlowSchema](indexer, v1beta1.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go index c94aaa4c1d..902f7cc4bb 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta1.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta1.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1beta1.PriorityLevelConfiguration](indexer, v1beta1.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go index 2710f26306..721c5f6bdd 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta2.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("flowschema"), name) - } - return obj.(*v1beta2.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1beta2.FlowSchema](indexer, v1beta2.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go index 00ede00709..3e8a2134fc 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta2.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta2.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1beta2.PriorityLevelConfiguration](indexer, v1beta2.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go index ef01b5a76e..c5555fd64d 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go @@ -20,8 +20,8 @@ package v1beta3 import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta3.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta3.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta3.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta3.Resource("flowschema"), name) - } - return obj.(*v1beta3.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1beta3.FlowSchema](indexer, v1beta3.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go index d05613949f..9f7d89c548 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1beta3 import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta3.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta3.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta3.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta3.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta3.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1beta3.PriorityLevelConfiguration](indexer, v1beta3.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/generic_helpers.go b/vendor/k8s.io/client-go/listers/generic_helpers.go new file mode 100644 index 0000000000..c69bb22b11 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/generic_helpers.go @@ -0,0 +1,72 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package listers + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// ResourceIndexer wraps an indexer, resource, and optional namespace for a given type. +// This is intended for use by listers (generated by lister-gen) only. +type ResourceIndexer[T runtime.Object] struct { + indexer cache.Indexer + resource schema.GroupResource + namespace string // empty for non-namespaced types +} + +// New returns a new instance of a lister (resource indexer) wrapping the given indexer and resource for the specified type. +// This is intended for use by listers (generated by lister-gen) only. +func New[T runtime.Object](indexer cache.Indexer, resource schema.GroupResource) ResourceIndexer[T] { + return ResourceIndexer[T]{indexer: indexer, resource: resource} +} + +// NewNamespaced returns a new instance of a namespaced lister (resource indexer) wrapping the given parent and namespace for the specified type. +// This is intended for use by listers (generated by lister-gen) only. +func NewNamespaced[T runtime.Object](parent ResourceIndexer[T], namespace string) ResourceIndexer[T] { + return ResourceIndexer[T]{indexer: parent.indexer, resource: parent.resource, namespace: namespace} +} + +// List lists all resources in the indexer matching the given selector. +func (l ResourceIndexer[T]) List(selector labels.Selector) (ret []T, err error) { + // ListAllByNamespace reverts to ListAll on empty namespaces + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(T)) + }) + return ret, err +} + +// Get retrieves the resource from the index for a given name. +func (l ResourceIndexer[T]) Get(name string) (T, error) { + var key string + if l.namespace == "" { + key = name + } else { + key = l.namespace + "/" + name + } + obj, exists, err := l.indexer.GetByKey(key) + if err != nil { + return *new(T), err + } + if !exists { + return *new(T), errors.NewNotFound(l.resource, name) + } + return obj.(T), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go index 0f49d4f572..3007cd3492 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*v1.Ingress](indexer, v1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*v1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -74,26 +66,5 @@ type IngressNamespaceLister interface { // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("ingress"), name) - } - return obj.(*v1.Ingress), nil + listers.ResourceIndexer[*v1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go index 1480cb13fd..a8efe5c5e3 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type IngressClassLister interface { // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{indexer: indexer} -} - -// List lists all IngressClasses in the indexer. -func (s *ingressClassLister) List(selector labels.Selector) (ret []*v1.IngressClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.IngressClass)) - }) - return ret, err -} - -// Get retrieves the IngressClass from the index for a given name. -func (s *ingressClassLister) Get(name string) (*v1.IngressClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("ingressclass"), name) - } - return obj.(*v1.IngressClass), nil + return &ingressClassLister{listers.New[*v1.IngressClass](indexer, v1.Resource("ingressclass"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go index 34cabf0577..9a3e3172ef 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{indexer: indexer} -} - -// List lists all NetworkPolicies in the indexer. -func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.NetworkPolicy)) - }) - return ret, err + return &networkPolicyLister{listers.New[*v1.NetworkPolicy](indexer, v1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return networkPolicyNamespaceLister{listers.NewNamespaced[*v1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -74,26 +66,5 @@ type NetworkPolicyNamespaceLister interface { // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NetworkPolicies in the indexer for a given namespace. -func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.NetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. -func (s networkPolicyNamespaceLister) Get(name string) (*v1.NetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("networkpolicy"), name) - } - return obj.(*v1.NetworkPolicy), nil + listers.ResourceIndexer[*v1.NetworkPolicy] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go index b3dfe27971..749affd7b9 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type IPAddressLister interface { // iPAddressLister implements the IPAddressLister interface. type iPAddressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.IPAddress] } // NewIPAddressLister returns a new IPAddressLister. func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { - return &iPAddressLister{indexer: indexer} -} - -// List lists all IPAddresses in the indexer. -func (s *iPAddressLister) List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.IPAddress)) - }) - return ret, err -} - -// Get retrieves the IPAddress from the index for a given name. -func (s *iPAddressLister) Get(name string) (*v1alpha1.IPAddress, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("ipaddress"), name) - } - return obj.(*v1alpha1.IPAddress), nil + return &iPAddressLister{listers.New[*v1alpha1.IPAddress](indexer, v1alpha1.Resource("ipaddress"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go index 8bc2b10e68..8be2d11af4 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ServiceCIDRLister interface { // serviceCIDRLister implements the ServiceCIDRLister interface. type serviceCIDRLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ServiceCIDR] } // NewServiceCIDRLister returns a new ServiceCIDRLister. func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { - return &serviceCIDRLister{indexer: indexer} -} - -// List lists all ServiceCIDRs in the indexer. -func (s *serviceCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ServiceCIDR)) - }) - return ret, err -} - -// Get retrieves the ServiceCIDR from the index for a given name. -func (s *serviceCIDRLister) Get(name string) (*v1alpha1.ServiceCIDR, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("servicecidr"), name) - } - return obj.(*v1alpha1.ServiceCIDR), nil + return &serviceCIDRLister{listers.New[*v1alpha1.ServiceCIDR](indexer, v1alpha1.Resource("servicecidr"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go index d8c99c186e..320af736e6 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +// IPAddressListerExpansion allows custom methods to be added to +// IPAddressLister. +type IPAddressListerExpansion interface{} + // IngressListerExpansion allows custom methods to be added to // IngressLister. type IngressListerExpansion interface{} @@ -29,3 +33,7 @@ type IngressNamespaceListerExpansion interface{} // IngressClassListerExpansion allows custom methods to be added to // IngressClassLister. type IngressClassListerExpansion interface{} + +// ServiceCIDRListerExpansion allows custom methods to be added to +// ServiceCIDRLister. +type ServiceCIDRListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go index b8f4d35580..b8fe99e240 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -74,26 +66,5 @@ type IngressNamespaceLister interface { // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) - } - return obj.(*v1beta1.Ingress), nil + listers.ResourceIndexer[*v1beta1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go index ebcd6ba85b..a5e33525f6 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type IngressClassLister interface { // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{indexer: indexer} -} - -// List lists all IngressClasses in the indexer. -func (s *ingressClassLister) List(selector labels.Selector) (ret []*v1beta1.IngressClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.IngressClass)) - }) - return ret, err -} - -// Get retrieves the IngressClass from the index for a given name. -func (s *ingressClassLister) Get(name string) (*v1beta1.IngressClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingressclass"), name) - } - return obj.(*v1beta1.IngressClass), nil + return &ingressClassLister{listers.New[*v1beta1.IngressClass](indexer, v1beta1.Resource("ingressclass"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..3614066701 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// IPAddressLister helps list IPAddresses. +// All objects returned here must be treated as read-only. +type IPAddressLister interface { + // List lists all IPAddresses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.IPAddress, err error) + // Get retrieves the IPAddress from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.IPAddress, error) + IPAddressListerExpansion +} + +// iPAddressLister implements the IPAddressLister interface. +type iPAddressLister struct { + listers.ResourceIndexer[*v1beta1.IPAddress] +} + +// NewIPAddressLister returns a new IPAddressLister. +func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { + return &iPAddressLister{listers.New[*v1beta1.IPAddress](indexer, v1beta1.Resource("ipaddress"))} +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go new file mode 100644 index 0000000000..2902fa7f15 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ServiceCIDRLister helps list ServiceCIDRs. +// All objects returned here must be treated as read-only. +type ServiceCIDRLister interface { + // List lists all ServiceCIDRs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ServiceCIDR, err error) + // Get retrieves the ServiceCIDR from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.ServiceCIDR, error) + ServiceCIDRListerExpansion +} + +// serviceCIDRLister implements the ServiceCIDRLister interface. +type serviceCIDRLister struct { + listers.ResourceIndexer[*v1beta1.ServiceCIDR] +} + +// NewServiceCIDRLister returns a new ServiceCIDRLister. +func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { + return &serviceCIDRLister{listers.New[*v1beta1.ServiceCIDR](indexer, v1beta1.Resource("servicecidr"))} +} diff --git a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go index 6e00cf1a59..17b88687e2 100644 --- a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/node/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type RuntimeClassLister interface { // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("runtimeclass"), name) - } - return obj.(*v1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*v1.RuntimeClass](indexer, v1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go index 31f3357990..1f6e06f489 100644 --- a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type RuntimeClassLister interface { // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1alpha1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("runtimeclass"), name) - } - return obj.(*v1alpha1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*v1alpha1.RuntimeClass](indexer, v1alpha1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go index 7dbd6ab268..cd0cdf3c52 100644 --- a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type RuntimeClassLister interface { // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1beta1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("runtimeclass"), name) - } - return obj.(*v1beta1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*v1beta1.RuntimeClass](indexer, v1beta1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/policy/v1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go index dc5ffa0740..83695668fa 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{indexer: indexer} -} - -// List lists all Evictions in the indexer. -func (s *evictionLister) List(selector labels.Selector) (ret []*v1.Eviction, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Eviction)) - }) - return ret, err + return &evictionLister{listers.New[*v1.Eviction](indexer, v1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} + return evictionNamespaceLister{listers.NewNamespaced[*v1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -74,26 +66,5 @@ type EvictionNamespaceLister interface { // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Evictions in the indexer for a given namespace. -func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1.Eviction, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Eviction)) - }) - return ret, err -} - -// Get retrieves the Eviction from the indexer for a given namespace and name. -func (s evictionNamespaceLister) Get(name string) (*v1.Eviction, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("eviction"), name) - } - return obj.(*v1.Eviction), nil + listers.ResourceIndexer[*v1.Eviction] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go index 8470d38bb2..38ed8144eb 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{indexer: indexer} -} - -// List lists all PodDisruptionBudgets in the indexer. -func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodDisruptionBudget)) - }) - return ret, err + return &podDisruptionBudgetLister{listers.New[*v1.PodDisruptionBudget](indexer, v1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -74,26 +66,5 @@ type PodDisruptionBudgetNamespaceLister interface { // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodDisruptionBudgets in the indexer for a given namespace. -func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodDisruptionBudget)) - }) - return ret, err -} - -// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. -func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1.PodDisruptionBudget, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("poddisruptionbudget"), name) - } - return obj.(*v1.PodDisruptionBudget), nil + listers.ResourceIndexer[*v1.PodDisruptionBudget] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go index e1d40d0b32..0aff833524 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{indexer: indexer} -} - -// List lists all Evictions in the indexer. -func (s *evictionLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Eviction)) - }) - return ret, err + return &evictionLister{listers.New[*v1beta1.Eviction](indexer, v1beta1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} + return evictionNamespaceLister{listers.NewNamespaced[*v1beta1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -74,26 +66,5 @@ type EvictionNamespaceLister interface { // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Evictions in the indexer for a given namespace. -func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Eviction)) - }) - return ret, err -} - -// Get retrieves the Eviction from the indexer for a given namespace and name. -func (s evictionNamespaceLister) Get(name string) (*v1beta1.Eviction, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("eviction"), name) - } - return obj.(*v1beta1.Eviction), nil + listers.ResourceIndexer[*v1beta1.Eviction] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go index aa08f813ee..55ae892e2b 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{indexer: indexer} -} - -// List lists all PodDisruptionBudgets in the indexer. -func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) - }) - return ret, err + return &podDisruptionBudgetLister{listers.New[*v1beta1.PodDisruptionBudget](indexer, v1beta1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1beta1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -74,26 +66,5 @@ type PodDisruptionBudgetNamespaceLister interface { // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodDisruptionBudgets in the indexer for a given namespace. -func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) - }) - return ret, err -} - -// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. -func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1beta1.PodDisruptionBudget, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("poddisruptionbudget"), name) - } - return obj.(*v1beta1.PodDisruptionBudget), nil + listers.ResourceIndexer[*v1beta1.PodDisruptionBudget] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go index 84dc003ca2..11a4cb4db4 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleLister interface { // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("clusterrole"), name) - } - return obj.(*v1.ClusterRole), nil + return &clusterRoleLister{listers.New[*v1.ClusterRole](indexer, v1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go index ff061d4b2b..4c3583bb94 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleBindingLister interface { // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("clusterrolebinding"), name) - } - return obj.(*v1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*v1.ClusterRoleBinding](indexer, v1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1/role.go index 503f013b52..3e94253215 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/role.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Role)) - }) - return ret, err + return &roleLister{listers.New[*v1.Role](indexer, v1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*v1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -74,26 +66,5 @@ type RoleNamespaceLister interface { // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("role"), name) - } - return obj.(*v1.Role), nil + listers.ResourceIndexer[*v1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go index ea50c64136..1b3162a113 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*v1.RoleBinding](indexer, v1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*v1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -74,26 +66,5 @@ type RoleBindingNamespaceLister interface { // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("rolebinding"), name) - } - return obj.(*v1.RoleBinding), nil + listers.ResourceIndexer[*v1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go index 181ea95a7d..5e5bbaa5a2 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleLister interface { // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1alpha1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clusterrole"), name) - } - return obj.(*v1alpha1.ClusterRole), nil + return &clusterRoleLister{listers.New[*v1alpha1.ClusterRole](indexer, v1alpha1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go index 29d283b6cf..d825d0a2f4 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleBindingLister interface { // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1alpha1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clusterrolebinding"), name) - } - return obj.(*v1alpha1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*v1alpha1.ClusterRoleBinding](indexer, v1alpha1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go index 13a64137ae..f3d2b2838d 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Role)) - }) - return ret, err + return &roleLister{listers.New[*v1alpha1.Role](indexer, v1alpha1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*v1alpha1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -74,26 +66,5 @@ type RoleNamespaceLister interface { // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1alpha1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("role"), name) - } - return obj.(*v1alpha1.Role), nil + listers.ResourceIndexer[*v1alpha1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go index 0ad3d0eba0..6d6f7b7005 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*v1alpha1.RoleBinding](indexer, v1alpha1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*v1alpha1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -74,26 +66,5 @@ type RoleBindingNamespaceLister interface { // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1alpha1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("rolebinding"), name) - } - return obj.(*v1alpha1.RoleBinding), nil + listers.ResourceIndexer[*v1alpha1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go index bf6cd99cb1..bade032628 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleLister interface { // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1beta1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("clusterrole"), name) - } - return obj.(*v1beta1.ClusterRole), nil + return &clusterRoleLister{listers.New[*v1beta1.ClusterRole](indexer, v1beta1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go index 00bab2330b..1f4d391bef 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleBindingLister interface { // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1beta1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("clusterrolebinding"), name) - } - return obj.(*v1beta1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*v1beta1.ClusterRoleBinding](indexer, v1beta1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go index 9cd9b9042d..71666a9a03 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Role)) - }) - return ret, err + return &roleLister{listers.New[*v1beta1.Role](indexer, v1beta1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*v1beta1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -74,26 +66,5 @@ type RoleNamespaceLister interface { // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1beta1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("role"), name) - } - return obj.(*v1beta1.Role), nil + listers.ResourceIndexer[*v1beta1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go index 7c7c91bf3f..00f8542cbd 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*v1beta1.RoleBinding](indexer, v1beta1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*v1beta1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -74,26 +66,5 @@ type RoleBindingNamespaceLister interface { // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1beta1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("rolebinding"), name) - } - return obj.(*v1beta1.RoleBinding), nil + listers.ResourceIndexer[*v1beta1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimparameters.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index 1a561ef7a5..0000000000 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClaimParametersLister helps list ResourceClaimParameters. -// All objects returned here must be treated as read-only. -type ResourceClaimParametersLister interface { - // List lists all ResourceClaimParameters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) - // ResourceClaimParameters returns an object that can list and get ResourceClaimParameters. - ResourceClaimParameters(namespace string) ResourceClaimParametersNamespaceLister - ResourceClaimParametersListerExpansion -} - -// resourceClaimParametersLister implements the ResourceClaimParametersLister interface. -type resourceClaimParametersLister struct { - indexer cache.Indexer -} - -// NewResourceClaimParametersLister returns a new ResourceClaimParametersLister. -func NewResourceClaimParametersLister(indexer cache.Indexer) ResourceClaimParametersLister { - return &resourceClaimParametersLister{indexer: indexer} -} - -// List lists all ResourceClaimParameters in the indexer. -func (s *resourceClaimParametersLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimParameters)) - }) - return ret, err -} - -// ResourceClaimParameters returns an object that can list and get ResourceClaimParameters. -func (s *resourceClaimParametersLister) ResourceClaimParameters(namespace string) ResourceClaimParametersNamespaceLister { - return resourceClaimParametersNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ResourceClaimParametersNamespaceLister helps list and get ResourceClaimParameters. -// All objects returned here must be treated as read-only. -type ResourceClaimParametersNamespaceLister interface { - // List lists all ResourceClaimParameters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) - // Get retrieves the ResourceClaimParameters from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaimParameters, error) - ResourceClaimParametersNamespaceListerExpansion -} - -// resourceClaimParametersNamespaceLister implements the ResourceClaimParametersNamespaceLister -// interface. -type resourceClaimParametersNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaimParameters in the indexer for a given namespace. -func (s resourceClaimParametersNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimParameters)) - }) - return ret, err -} - -// Get retrieves the ResourceClaimParameters from the indexer for a given namespace and name. -func (s resourceClaimParametersNamespaceLister) Get(name string) (*v1alpha2.ResourceClaimParameters, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaimparameters"), name) - } - return obj.(*v1alpha2.ResourceClaimParameters), nil -} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go deleted file mode 100644 index eeb2fc3379..0000000000 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClassLister helps list ResourceClasses. -// All objects returned here must be treated as read-only. -type ResourceClassLister interface { - // List lists all ResourceClasses in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) - // Get retrieves the ResourceClass from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClass, error) - ResourceClassListerExpansion -} - -// resourceClassLister implements the ResourceClassLister interface. -type resourceClassLister struct { - indexer cache.Indexer -} - -// NewResourceClassLister returns a new ResourceClassLister. -func NewResourceClassLister(indexer cache.Indexer) ResourceClassLister { - return &resourceClassLister{indexer: indexer} -} - -// List lists all ResourceClasses in the indexer. -func (s *resourceClassLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClass)) - }) - return ret, err -} - -// Get retrieves the ResourceClass from the index for a given name. -func (s *resourceClassLister) Get(name string) (*v1alpha2.ResourceClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclass"), name) - } - return obj.(*v1alpha2.ResourceClass), nil -} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclassparameters.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 26fb95e6d2..0000000000 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClassParametersLister helps list ResourceClassParameters. -// All objects returned here must be treated as read-only. -type ResourceClassParametersLister interface { - // List lists all ResourceClassParameters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) - // ResourceClassParameters returns an object that can list and get ResourceClassParameters. - ResourceClassParameters(namespace string) ResourceClassParametersNamespaceLister - ResourceClassParametersListerExpansion -} - -// resourceClassParametersLister implements the ResourceClassParametersLister interface. -type resourceClassParametersLister struct { - indexer cache.Indexer -} - -// NewResourceClassParametersLister returns a new ResourceClassParametersLister. -func NewResourceClassParametersLister(indexer cache.Indexer) ResourceClassParametersLister { - return &resourceClassParametersLister{indexer: indexer} -} - -// List lists all ResourceClassParameters in the indexer. -func (s *resourceClassParametersLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClassParameters)) - }) - return ret, err -} - -// ResourceClassParameters returns an object that can list and get ResourceClassParameters. -func (s *resourceClassParametersLister) ResourceClassParameters(namespace string) ResourceClassParametersNamespaceLister { - return resourceClassParametersNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ResourceClassParametersNamespaceLister helps list and get ResourceClassParameters. -// All objects returned here must be treated as read-only. -type ResourceClassParametersNamespaceLister interface { - // List lists all ResourceClassParameters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) - // Get retrieves the ResourceClassParameters from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClassParameters, error) - ResourceClassParametersNamespaceListerExpansion -} - -// resourceClassParametersNamespaceLister implements the ResourceClassParametersNamespaceLister -// interface. -type resourceClassParametersNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClassParameters in the indexer for a given namespace. -func (s resourceClassParametersNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClassParameters)) - }) - return ret, err -} - -// Get retrieves the ResourceClassParameters from the indexer for a given namespace and name. -func (s resourceClassParametersNamespaceLister) Get(name string) (*v1alpha2.ResourceClassParameters, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclassparameters"), name) - } - return obj.(*v1alpha2.ResourceClassParameters), nil -} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..0950691e2b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// DeviceClassLister helps list DeviceClasses. +// All objects returned here must be treated as read-only. +type DeviceClassLister interface { + // List lists all DeviceClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha3.DeviceClass, err error) + // Get retrieves the DeviceClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha3.DeviceClass, error) + DeviceClassListerExpansion +} + +// deviceClassLister implements the DeviceClassLister interface. +type deviceClassLister struct { + listers.ResourceIndexer[*v1alpha3.DeviceClass] +} + +// NewDeviceClassLister returns a new DeviceClassLister. +func NewDeviceClassLister(indexer cache.Indexer) DeviceClassLister { + return &deviceClassLister{listers.New[*v1alpha3.DeviceClass](indexer, v1alpha3.Resource("deviceclass"))} +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go similarity index 66% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go index 68861832d9..b6642f635f 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go @@ -16,7 +16,11 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 + +// DeviceClassListerExpansion allows custom methods to be added to +// DeviceClassLister. +type DeviceClassListerExpansion interface{} // PodSchedulingContextListerExpansion allows custom methods to be added to // PodSchedulingContextLister. @@ -34,14 +38,6 @@ type ResourceClaimListerExpansion interface{} // ResourceClaimNamespaceLister. type ResourceClaimNamespaceListerExpansion interface{} -// ResourceClaimParametersListerExpansion allows custom methods to be added to -// ResourceClaimParametersLister. -type ResourceClaimParametersListerExpansion interface{} - -// ResourceClaimParametersNamespaceListerExpansion allows custom methods to be added to -// ResourceClaimParametersNamespaceLister. -type ResourceClaimParametersNamespaceListerExpansion interface{} - // ResourceClaimTemplateListerExpansion allows custom methods to be added to // ResourceClaimTemplateLister. type ResourceClaimTemplateListerExpansion interface{} @@ -50,18 +46,6 @@ type ResourceClaimTemplateListerExpansion interface{} // ResourceClaimTemplateNamespaceLister. type ResourceClaimTemplateNamespaceListerExpansion interface{} -// ResourceClassListerExpansion allows custom methods to be added to -// ResourceClassLister. -type ResourceClassListerExpansion interface{} - -// ResourceClassParametersListerExpansion allows custom methods to be added to -// ResourceClassParametersLister. -type ResourceClassParametersListerExpansion interface{} - -// ResourceClassParametersNamespaceListerExpansion allows custom methods to be added to -// ResourceClassParametersNamespaceLister. -type ResourceClassParametersNamespaceListerExpansion interface{} - // ResourceSliceListerExpansion allows custom methods to be added to // ResourceSliceLister. type ResourceSliceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go similarity index 59% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go index c50b3f8890..ed9b049432 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -30,7 +30,7 @@ import ( type PodSchedulingContextLister interface { // List lists all PodSchedulingContexts in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) + List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error) // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister PodSchedulingContextListerExpansion @@ -38,25 +38,17 @@ type PodSchedulingContextLister interface { // podSchedulingContextLister implements the PodSchedulingContextLister interface. type podSchedulingContextLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha3.PodSchedulingContext] } // NewPodSchedulingContextLister returns a new PodSchedulingContextLister. func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister { - return &podSchedulingContextLister{indexer: indexer} -} - -// List lists all PodSchedulingContexts in the indexer. -func (s *podSchedulingContextLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) - }) - return ret, err + return &podSchedulingContextLister{listers.New[*v1alpha3.PodSchedulingContext](indexer, v1alpha3.Resource("podschedulingcontext"))} } // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister { - return podSchedulingContextNamespaceLister{indexer: s.indexer, namespace: namespace} + return podSchedulingContextNamespaceLister{listers.NewNamespaced[*v1alpha3.PodSchedulingContext](s.ResourceIndexer, namespace)} } // PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts. @@ -64,36 +56,15 @@ func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) Pod type PodSchedulingContextNamespaceLister interface { // List lists all PodSchedulingContexts in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) + List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error) // Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.PodSchedulingContext, error) + Get(name string) (*v1alpha3.PodSchedulingContext, error) PodSchedulingContextNamespaceListerExpansion } // podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister // interface. type podSchedulingContextNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodSchedulingContexts in the indexer for a given namespace. -func (s podSchedulingContextNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) - }) - return ret, err -} - -// Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. -func (s podSchedulingContextNamespaceLister) Get(name string) (*v1alpha2.PodSchedulingContext, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("podschedulingcontext"), name) - } - return obj.(*v1alpha2.PodSchedulingContext), nil + listers.ResourceIndexer[*v1alpha3.PodSchedulingContext] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go similarity index 58% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go index 273f16af31..ac6a3e1564 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -30,7 +30,7 @@ import ( type ResourceClaimLister interface { // List lists all ResourceClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error) // ResourceClaims returns an object that can list and get ResourceClaims. ResourceClaims(namespace string) ResourceClaimNamespaceLister ResourceClaimListerExpansion @@ -38,25 +38,17 @@ type ResourceClaimLister interface { // resourceClaimLister implements the ResourceClaimLister interface. type resourceClaimLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha3.ResourceClaim] } // NewResourceClaimLister returns a new ResourceClaimLister. func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { - return &resourceClaimLister{indexer: indexer} -} - -// List lists all ResourceClaims in the indexer. -func (s *resourceClaimLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaim)) - }) - return ret, err + return &resourceClaimLister{listers.New[*v1alpha3.ResourceClaim](indexer, v1alpha3.Resource("resourceclaim"))} } // ResourceClaims returns an object that can list and get ResourceClaims. func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { - return resourceClaimNamespaceLister{indexer: s.indexer, namespace: namespace} + return resourceClaimNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaim](s.ResourceIndexer, namespace)} } // ResourceClaimNamespaceLister helps list and get ResourceClaims. @@ -64,36 +56,15 @@ func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimName type ResourceClaimNamespaceLister interface { // List lists all ResourceClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error) // Get retrieves the ResourceClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaim, error) + Get(name string) (*v1alpha3.ResourceClaim, error) ResourceClaimNamespaceListerExpansion } // resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister // interface. type resourceClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaims in the indexer for a given namespace. -func (s resourceClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaim)) - }) - return ret, err -} - -// Get retrieves the ResourceClaim from the indexer for a given namespace and name. -func (s resourceClaimNamespaceLister) Get(name string) (*v1alpha2.ResourceClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaim"), name) - } - return obj.(*v1alpha2.ResourceClaim), nil + listers.ResourceIndexer[*v1alpha3.ResourceClaim] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go similarity index 59% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go index 91a488b174..6c15f82bba 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -30,7 +30,7 @@ import ( type ResourceClaimTemplateLister interface { // List lists all ResourceClaimTemplates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error) // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister ResourceClaimTemplateListerExpansion @@ -38,25 +38,17 @@ type ResourceClaimTemplateLister interface { // resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. type resourceClaimTemplateLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate] } // NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { - return &resourceClaimTemplateLister{indexer: indexer} -} - -// List lists all ResourceClaimTemplates in the indexer. -func (s *resourceClaimTemplateLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) - }) - return ret, err + return &resourceClaimTemplateLister{listers.New[*v1alpha3.ResourceClaimTemplate](indexer, v1alpha3.Resource("resourceclaimtemplate"))} } // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { - return resourceClaimTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} + return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaimTemplate](s.ResourceIndexer, namespace)} } // ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. @@ -64,36 +56,15 @@ func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) R type ResourceClaimTemplateNamespaceLister interface { // List lists all ResourceClaimTemplates in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error) // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaimTemplate, error) + Get(name string) (*v1alpha3.ResourceClaimTemplate, error) ResourceClaimTemplateNamespaceListerExpansion } // resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister // interface. type resourceClaimTemplateNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaimTemplates in the indexer for a given namespace. -func (s resourceClaimTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) - }) - return ret, err -} - -// Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. -func (s resourceClaimTemplateNamespaceLister) Get(name string) (*v1alpha2.ResourceClaimTemplate, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaimtemplate"), name) - } - return obj.(*v1alpha2.ResourceClaimTemplate), nil + listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceslice.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go similarity index 58% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceslice.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go index 4301cea2e3..ae87b8b66d 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceslice.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -30,39 +30,19 @@ import ( type ResourceSliceLister interface { // List lists all ResourceSlices in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceSlice, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceSlice, err error) // Get retrieves the ResourceSlice from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceSlice, error) + Get(name string) (*v1alpha3.ResourceSlice, error) ResourceSliceListerExpansion } // resourceSliceLister implements the ResourceSliceLister interface. type resourceSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha3.ResourceSlice] } // NewResourceSliceLister returns a new ResourceSliceLister. func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister { - return &resourceSliceLister{indexer: indexer} -} - -// List lists all ResourceSlices in the indexer. -func (s *resourceSliceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceSlice)) - }) - return ret, err -} - -// Get retrieves the ResourceSlice from the index for a given name. -func (s *resourceSliceLister) Get(name string) (*v1alpha2.ResourceSlice, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceslice"), name) - } - return obj.(*v1alpha2.ResourceSlice), nil + return &resourceSliceLister{listers.New[*v1alpha3.ResourceSlice](indexer, v1alpha3.Resource("resourceslice"))} } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go index 4da84ccf8a..b9179b5685 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityClassLister interface { // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("priorityclass"), name) - } - return obj.(*v1.PriorityClass), nil + return &priorityClassLister{listers.New[*v1.PriorityClass](indexer, v1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go index 3d25dc80af..776ad5ae25 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityClassLister interface { // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1alpha1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("priorityclass"), name) - } - return obj.(*v1alpha1.PriorityClass), nil + return &priorityClassLister{listers.New[*v1alpha1.PriorityClass](indexer, v1alpha1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go index c848d035af..966064e5d6 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/scheduling/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityClassLister interface { // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1beta1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1beta1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("priorityclass"), name) - } - return obj.(*v1beta1.PriorityClass), nil + return &priorityClassLister{listers.New[*v1beta1.PriorityClass](indexer, v1beta1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go index 4e8ab90900..db64f45887 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSIDriverLister interface { // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{indexer: indexer} -} - -// List lists all CSIDrivers in the indexer. -func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1.CSIDriver, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIDriver)) - }) - return ret, err -} - -// Get retrieves the CSIDriver from the index for a given name. -func (s *cSIDriverLister) Get(name string) (*v1.CSIDriver, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csidriver"), name) - } - return obj.(*v1.CSIDriver), nil + return &cSIDriverLister{listers.New[*v1.CSIDriver](indexer, v1.Resource("csidriver"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go index 93f869572c..5bfd2a43ae 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSINodeLister interface { // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{indexer: indexer} -} - -// List lists all CSINodes in the indexer. -func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1.CSINode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSINode)) - }) - return ret, err -} - -// Get retrieves the CSINode from the index for a given name. -func (s *cSINodeLister) Get(name string) (*v1.CSINode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csinode"), name) - } - return obj.(*v1.CSINode), nil + return &cSINodeLister{listers.New[*v1.CSINode](indexer, v1.Resource("csinode"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go index a72328c9a3..c2acfa1153 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*v1.CSIStorageCapacity](indexer, v1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -74,26 +66,5 @@ type CSIStorageCapacityNamespaceLister interface { // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csistoragecapacity"), name) - } - return obj.(*v1.CSIStorageCapacity), nil + listers.ResourceIndexer[*v1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go index ffa3d19f50..fc37594446 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type StorageClassLister interface { // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{indexer: indexer} -} - -// List lists all StorageClasses in the indexer. -func (s *storageClassLister) List(selector labels.Selector) (ret []*v1.StorageClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StorageClass)) - }) - return ret, err -} - -// Get retrieves the StorageClass from the index for a given name. -func (s *storageClassLister) Get(name string) (*v1.StorageClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("storageclass"), name) - } - return obj.(*v1.StorageClass), nil + return &storageClassLister{listers.New[*v1.StorageClass](indexer, v1.Resource("storageclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go index fbc735c939..44754d6f25 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttachmentLister interface { // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("volumeattachment"), name) - } - return obj.(*v1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*v1.VolumeAttachment](indexer, v1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go index 0c1b5f2647..7f75aae2cd 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*v1alpha1.CSIStorageCapacity](indexer, v1alpha1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1alpha1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -74,26 +66,5 @@ type CSIStorageCapacityNamespaceLister interface { // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1alpha1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("csistoragecapacity"), name) - } - return obj.(*v1alpha1.CSIStorageCapacity), nil + listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go index 3d5e2b7b71..122864ffef 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttachmentLister interface { // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1alpha1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("volumeattachment"), name) - } - return obj.(*v1alpha1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*v1alpha1.VolumeAttachment](indexer, v1alpha1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go index f30b4a89ba..5d8ae09d7c 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttributesClassLister interface { // volumeAttributesClassLister implements the VolumeAttributesClassLister interface. type volumeAttributesClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.VolumeAttributesClass] } // NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { - return &volumeAttributesClassLister{indexer: indexer} -} - -// List lists all VolumeAttributesClasses in the indexer. -func (s *volumeAttributesClassLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VolumeAttributesClass)) - }) - return ret, err -} - -// Get retrieves the VolumeAttributesClass from the index for a given name. -func (s *volumeAttributesClassLister) Get(name string) (*v1alpha1.VolumeAttributesClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("volumeattributesclass"), name) - } - return obj.(*v1alpha1.VolumeAttributesClass), nil + return &volumeAttributesClassLister{listers.New[*v1alpha1.VolumeAttributesClass](indexer, v1alpha1.Resource("volumeattributesclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go index c6787aa01b..6600386749 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSIDriverLister interface { // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{indexer: indexer} -} - -// List lists all CSIDrivers in the indexer. -func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIDriver)) - }) - return ret, err -} - -// Get retrieves the CSIDriver from the index for a given name. -func (s *cSIDriverLister) Get(name string) (*v1beta1.CSIDriver, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csidriver"), name) - } - return obj.(*v1beta1.CSIDriver), nil + return &cSIDriverLister{listers.New[*v1beta1.CSIDriver](indexer, v1beta1.Resource("csidriver"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go index 809efaa369..2c29ccabf3 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSINodeLister interface { // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{indexer: indexer} -} - -// List lists all CSINodes in the indexer. -func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSINode)) - }) - return ret, err -} - -// Get retrieves the CSINode from the index for a given name. -func (s *cSINodeLister) Get(name string) (*v1beta1.CSINode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csinode"), name) - } - return obj.(*v1beta1.CSINode), nil + return &cSINodeLister{listers.New[*v1beta1.CSINode](indexer, v1beta1.Resource("csinode"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go index 4680ffb7c8..365304df12 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*v1beta1.CSIStorageCapacity](indexer, v1beta1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1beta1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -74,26 +66,5 @@ type CSIStorageCapacityNamespaceLister interface { // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1beta1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csistoragecapacity"), name) - } - return obj.(*v1beta1.CSIStorageCapacity), nil + listers.ResourceIndexer[*v1beta1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go index c2b0d5b17d..4f56776be1 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -41,3 +41,7 @@ type StorageClassListerExpansion interface{} // VolumeAttachmentListerExpansion allows custom methods to be added to // VolumeAttachmentLister. type VolumeAttachmentListerExpansion interface{} + +// VolumeAttributesClassListerExpansion allows custom methods to be added to +// VolumeAttributesClassLister. +type VolumeAttributesClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go index eb7b8315c6..070c061bc5 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type StorageClassLister interface { // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{indexer: indexer} -} - -// List lists all StorageClasses in the indexer. -func (s *storageClassLister) List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StorageClass)) - }) - return ret, err -} - -// Get retrieves the StorageClass from the index for a given name. -func (s *storageClassLister) Get(name string) (*v1beta1.StorageClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("storageclass"), name) - } - return obj.(*v1beta1.StorageClass), nil + return &storageClassLister{listers.New[*v1beta1.StorageClass](indexer, v1beta1.Resource("storageclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go index bab2d317c7..d437c1eaeb 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttachmentLister interface { // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1beta1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1beta1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("volumeattachment"), name) - } - return obj.(*v1beta1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*v1beta1.VolumeAttachment](indexer, v1beta1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..2ff71e3d7f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassLister helps list VolumeAttributesClasses. +// All objects returned here must be treated as read-only. +type VolumeAttributesClassLister interface { + // List lists all VolumeAttributesClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.VolumeAttributesClass, err error) + // Get retrieves the VolumeAttributesClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.VolumeAttributesClass, error) + VolumeAttributesClassListerExpansion +} + +// volumeAttributesClassLister implements the VolumeAttributesClassLister interface. +type volumeAttributesClassLister struct { + listers.ResourceIndexer[*v1beta1.VolumeAttributesClass] +} + +// NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. +func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { + return &volumeAttributesClassLister{listers.New[*v1beta1.VolumeAttributesClass](indexer, v1beta1.Resource("volumeattributesclass"))} +} diff --git a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go index b65bf25324..794dba25c8 100644 --- a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go +++ b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storagemigration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type StorageVersionMigrationLister interface { // storageVersionMigrationLister implements the StorageVersionMigrationLister interface. type storageVersionMigrationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.StorageVersionMigration] } // NewStorageVersionMigrationLister returns a new StorageVersionMigrationLister. func NewStorageVersionMigrationLister(indexer cache.Indexer) StorageVersionMigrationLister { - return &storageVersionMigrationLister{indexer: indexer} -} - -// List lists all StorageVersionMigrations in the indexer. -func (s *storageVersionMigrationLister) List(selector labels.Selector) (ret []*v1alpha1.StorageVersionMigration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.StorageVersionMigration)) - }) - return ret, err -} - -// Get retrieves the StorageVersionMigration from the index for a given name. -func (s *storageVersionMigrationLister) Get(name string) (*v1alpha1.StorageVersionMigration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("storageversionmigration"), name) - } - return obj.(*v1alpha1.StorageVersionMigration), nil + return &storageVersionMigrationLister{listers.New[*v1alpha1.StorageVersionMigration](indexer, v1alpha1.Resource("storageversionmigration"))} } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go index 3f0688774e..2d178ce374 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go @@ -1,6 +1,3 @@ -//go:build !providerless -// +build !providerless - /* Copyright 2016 The Kubernetes Authors. diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 850e57daeb..f5a9f68ca4 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -37,12 +37,15 @@ import ( "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" @@ -768,6 +771,142 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { } } +type WatchListResult struct { + // err holds any errors we might have received + // during streaming. + err error + + // items hold the collected data + items []runtime.Object + + // initialEventsEndBookmarkRV holds the resource version + // extracted from the bookmark event that marks + // the end of the stream. + initialEventsEndBookmarkRV string + + // gv represents the API version + // it is used to construct the final list response + // normally this information is filled by the server + gv schema.GroupVersion +} + +func (r WatchListResult) Into(obj runtime.Object) error { + if r.err != nil { + return r.err + } + + listPtr, err := meta.GetItemsPtr(obj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + + if len(r.items) == 0 { + listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0)) + } else { + listVal.Set(reflect.MakeSlice(listVal.Type(), len(r.items), len(r.items))) + for i, o := range r.items { + if listVal.Type().Elem() != reflect.TypeOf(o).Elem() { + return fmt.Errorf("received object type = %v at index = %d, doesn't match the list item type = %v", reflect.TypeOf(o).Elem(), i, listVal.Type().Elem()) + } + listVal.Index(i).Set(reflect.ValueOf(o).Elem()) + } + } + + listMeta, err := meta.ListAccessor(obj) + if err != nil { + return err + } + listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV) + + typeMeta, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + version := r.gv.String() + typeMeta.SetAPIVersion(version) + typeMeta.SetKind(reflect.TypeOf(obj).Elem().Name()) + + return nil +} + +// WatchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// Note that the watchlist requires properly setting the ListOptions +// otherwise it just establishes a regular watch with the server. +// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists +// to see what parameters are currently required. +func (r *Request) WatchList(ctx context.Context) WatchListResult { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)} + } + // TODO(#115478): consider validating request parameters (i.e sendInitialEvents). + // Most users use the generated client, which handles the proper setting of parameters. + // We don't have validation for other methods (e.g., the Watch) + // thus, for symmetry, we haven't added additional checks for the WatchList method. + w, err := r.Watch(ctx) + if err != nil { + return WatchListResult{err: err} + } + return r.handleWatchList(ctx, w) +} + +// handleWatchList holds the actual logic for easier unit testing. +// Note that this function will close the passed watch. +func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchListResult { + defer w.Stop() + var lastKey string + var items []runtime.Object + + for { + select { + case <-ctx.Done(): + return WatchListResult{err: ctx.Err()} + case event, ok := <-w.ResultChan(): + if !ok { + return WatchListResult{err: fmt.Errorf("unexpected watch close")} + } + if event.Type == watch.Error { + return WatchListResult{err: errors.FromObject(event.Object)} + } + meta, err := meta.Accessor(event.Object) + if err != nil { + return WatchListResult{err: fmt.Errorf("failed to parse watch event: %#v", event)} + } + + switch event.Type { + case watch.Added: + // the following check ensures that the response is ordered. + // earlier servers had a bug that caused them to not sort the output. + // in such cases, return an error which can trigger fallback logic. + key := objectKeyFromMeta(meta) + if len(lastKey) > 0 && lastKey > key { + return WatchListResult{err: fmt.Errorf("cannot add the obj (%#v) with the key = %s, as it violates the ordering guarantees provided by the watchlist feature in beta phase, lastInsertedKey was = %s", event.Object, key, lastKey)} + } + items = append(items, event.Object) + lastKey = key + case watch.Bookmark: + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + return WatchListResult{ + items: items, + initialEventsEndBookmarkRV: meta.GetResourceVersion(), + gv: r.c.content.GroupVersion, + } + } + default: + return WatchListResult{err: fmt.Errorf("unexpected watch event %#v, expected to only receive watch.Added and watch.Bookmark events", event)} + } + } + } +} + func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) { contentType := resp.Header.Get("Content-Type") mediaType, params, err := mime.ParseMediaType(contentType) @@ -1470,3 +1609,10 @@ func ValidatePathSegmentName(name string, prefix bool) []string { } return IsValidPathSegmentName(name) } + +func objectKeyFromMeta(objMeta metav1.Object) string { + if len(objMeta.GetNamespace()) > 0 { + return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName()) + } + return objMeta.GetName() +} diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index e95c020b2e..9e1e04d14e 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -51,7 +51,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, err } if res != &got { - return "", nil, fmt.Errorf("unable to decode to metav1.Event") + return "", nil, fmt.Errorf("unable to decode to metav1.WatchEvent") } switch got.Type { case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index c8ae0aaf5d..270cc4ddbd 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -30,41 +30,61 @@ import ( ) func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + return NewRootGetActionWithOptions(resource, name, metav1.GetOptions{}) +} + +func NewRootGetActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Name = name + action.GetOptions = opts return action } func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + return NewGetActionWithOptions(resource, namespace, name, metav1.GetOptions{}) +} + +func NewGetActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + return NewGetSubresourceActionWithOptions(resource, namespace, subresource, name, metav1.GetOptions{}) +} + +func NewGetSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + return NewRootGetSubresourceActionWithOptions(resource, subresource, name, metav1.GetOptions{}) +} + +func NewRootGetSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Name = name + action.GetOptions = opts return action } @@ -76,6 +96,21 @@ func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVe action.Kind = kind labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewRootListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.ListOptions = opts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} return action } @@ -86,6 +121,21 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio action.Resource = resource action.Kind = kind action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} @@ -93,36 +143,55 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio } func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + return NewRootCreateActionWithOptions(resource, object, metav1.CreateOptions{}) +} + +func NewRootCreateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Object = object + action.CreateOptions = opts return action } func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateActionWithOptions(resource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Namespace = namespace action.Object = object + action.CreateOptions = opts return action } func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + return NewRootCreateSubresourceActionWithOptions(resource, name, subresource, object, metav1.CreateOptions{}) +} + +func NewRootCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateSubresourceActionWithOptions(resource, name, subresource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource @@ -130,41 +199,61 @@ func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subr action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + return NewRootUpdateActionWithOptions(resource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Object = object + action.UpdateOptions = opts return action } func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateActionWithOptions(resource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewRootPatchActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}) +} + +func NewRootPatchActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewPatchActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}) +} + +func NewPatchActionWithOptions(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -172,11 +261,16 @@ func NewPatchAction(resource schema.GroupVersionResource, namespace string, name action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewRootPatchSubresourceActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewRootPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -184,11 +278,16 @@ func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name st action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewPatchSubresourceActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -197,26 +296,38 @@ func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + return NewRootUpdateSubresourceActionWithOptions(resource, subresource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Object = object + action.UpdateOptions = opts return action } + func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateSubresourceActionWithOptions(resource, subresource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } @@ -236,11 +347,16 @@ func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name s } func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + return NewRootDeleteSubresourceActionWithOptions(resource, subresource, name, metav1.DeleteOptions{}) +} + +func NewRootDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Name = name + action.DeleteOptions = opts return action } @@ -261,41 +377,69 @@ func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, } func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + return NewDeleteSubresourceActionWithOptions(resource, subresource, namespace, name, metav1.DeleteOptions{}) +} + +func NewDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.DeleteOptions = opts return action } func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootDeleteCollectionActionWithOptions(resource, metav1.DeleteOptions{}, listOpts) +} + +func NewRootDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewDeleteCollectionActionWithOptions(resource, namespace, metav1.DeleteOptions{}, listOpts) +} + +func NewDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, namespace string, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource action.Namespace = namespace - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootWatchActionWithOptions(resource, listOpts) +} + +func NewRootWatchActionWithOptions(resource schema.GroupVersionResource, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -328,10 +472,17 @@ func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fi } func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewWatchActionWithOptions(resource, namespace, listOpts) +} + +func NewWatchActionWithOptions(resource schema.GroupVersionResource, namespace string, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -487,17 +638,23 @@ func (a GenericActionImpl) DeepCopy() Action { type GetActionImpl struct { ActionImpl - Name string + Name string + GetOptions metav1.GetOptions } func (a GetActionImpl) GetName() string { return a.Name } +func (a GetActionImpl) GetGetOptions() metav1.GetOptions { + return a.GetOptions +} + func (a GetActionImpl) DeepCopy() Action { return GetActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), Name: a.Name, + GetOptions: *a.GetOptions.DeepCopy(), } } @@ -506,6 +663,7 @@ type ListActionImpl struct { Kind schema.GroupVersionKind Name string ListRestrictions ListRestrictions + ListOptions metav1.ListOptions } func (a ListActionImpl) GetKind() schema.GroupVersionKind { @@ -516,6 +674,10 @@ func (a ListActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a ListActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a ListActionImpl) DeepCopy() Action { return ListActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -525,48 +687,62 @@ func (a ListActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + ListOptions: *a.ListOptions.DeepCopy(), } } type CreateActionImpl struct { ActionImpl - Name string - Object runtime.Object + Name string + Object runtime.Object + CreateOptions metav1.CreateOptions } func (a CreateActionImpl) GetObject() runtime.Object { return a.Object } +func (a CreateActionImpl) GetCreateOptions() metav1.CreateOptions { + return a.CreateOptions +} + func (a CreateActionImpl) DeepCopy() Action { return CreateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + CreateOptions: *a.CreateOptions.DeepCopy(), } } type UpdateActionImpl struct { ActionImpl - Object runtime.Object + Object runtime.Object + UpdateOptions metav1.UpdateOptions } func (a UpdateActionImpl) GetObject() runtime.Object { return a.Object } +func (a UpdateActionImpl) GetUpdateOptions() metav1.UpdateOptions { + return a.UpdateOptions +} + func (a UpdateActionImpl) DeepCopy() Action { return UpdateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + UpdateOptions: *a.UpdateOptions.DeepCopy(), } } type PatchActionImpl struct { ActionImpl - Name string - PatchType types.PatchType - Patch []byte + Name string + PatchType types.PatchType + Patch []byte + PatchOptions metav1.PatchOptions } func (a PatchActionImpl) GetName() string { @@ -581,14 +757,19 @@ func (a PatchActionImpl) GetPatchType() types.PatchType { return a.PatchType } +func (a PatchActionImpl) GetPatchOptions() metav1.PatchOptions { + return a.PatchOptions +} + func (a PatchActionImpl) DeepCopy() Action { patch := make([]byte, len(a.Patch)) copy(patch, a.Patch) return PatchActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - PatchType: a.PatchType, - Patch: patch, + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + PatchOptions: *a.PatchOptions.DeepCopy(), } } @@ -617,12 +798,22 @@ func (a DeleteActionImpl) DeepCopy() Action { type DeleteCollectionActionImpl struct { ActionImpl ListRestrictions ListRestrictions + DeleteOptions metav1.DeleteOptions + ListOptions metav1.ListOptions } func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a DeleteCollectionActionImpl) GetDeleteOptions() metav1.DeleteOptions { + return a.DeleteOptions +} + +func (a DeleteCollectionActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a DeleteCollectionActionImpl) DeepCopy() Action { return DeleteCollectionActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -630,18 +821,25 @@ func (a DeleteCollectionActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + DeleteOptions: *a.DeleteOptions.DeepCopy(), + ListOptions: *a.ListOptions.DeepCopy(), } } type WatchActionImpl struct { ActionImpl WatchRestrictions WatchRestrictions + ListOptions metav1.ListOptions } func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { return a.WatchRestrictions } +func (a WatchActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a WatchActionImpl) DeepCopy() Action { return WatchActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -650,6 +848,7 @@ func (a WatchActionImpl) DeepCopy() Action { Fields: a.WatchRestrictions.Fields.DeepCopySelector(), ResourceVersion: a.WatchRestrictions.ResourceVersion, }, + ListOptions: *a.ListOptions.DeepCopy(), } } diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index 396840670f..d288a3aa45 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -19,19 +19,24 @@ package testing import ( "fmt" "reflect" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/yaml" "sort" "strings" "sync" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta/testrestmapper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/managedfields" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" @@ -46,26 +51,32 @@ type ObjectTracker interface { Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. - Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. - Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error // Update updates an existing object in the tracker in the specified namespace. - Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error + + // Patch patches an existing object in the tracker in the specified namespace. + Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error + + // Apply applies an object in the tracker in the specified namespace. + Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. - List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. - Delete(gvr schema.GroupVersionResource, ns, name string) error + Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. - Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) + Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) } // ObjectScheme abstracts the implementation of common operations on objects. @@ -76,133 +87,200 @@ type ObjectScheme interface { // ObjectReaction returns a ReactionFunc that applies core.Action to // the given tracker. +// +// If tracker also implements ManagedFieldObjectTracker, then managed fields +// will be handled by the tracker and apply patch actions will be evaluated +// using the field manager and will take field ownership into consideration. +// Without a ManagedFieldObjectTracker, apply patch actions do not consider +// field ownership. +// +// WARNING: There is no server side defaulting, validation, or conversion handled +// by the fake client and subresources are not handled accurately (fields in the +// root resource are not automatically updated when a scale resource is updated, for example). func ObjectReaction(tracker ObjectTracker) ReactionFunc { + reactor := objectTrackerReact{tracker: tracker} return func(action Action) (bool, runtime.Object, error) { - ns := action.GetNamespace() - gvr := action.GetResource() // Here and below we need to switch on implementation types, // not on interfaces, as some interfaces are identical // (e.g. UpdateAction and CreateAction), so if we use them, // updates and creates end up matching the same case branch. switch action := action.(type) { - case ListActionImpl: - obj, err := tracker.List(gvr, action.GetKind(), ns) + obj, err := reactor.List(action) return true, obj, err - case GetActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) + obj, err := reactor.Get(action) return true, obj, err - case CreateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - if action.GetSubresource() == "" { - err = tracker.Create(gvr, action.GetObject(), ns) - } else { - oldObj, getOldObjErr := tracker.Get(gvr, ns, objMeta.GetName()) - if getOldObjErr != nil { - return true, nil, getOldObjErr - } - // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. - if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { - // TODO: Currently we're handling subresource creation as an update - // on the enclosing resource. This works for some subresources but - // might not be generic enough. - err = tracker.Update(gvr, action.GetObject(), ns) - } else { - // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. - return true, action.GetObject(), nil - } - } - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Create(action) return true, obj, err - case UpdateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - err = tracker.Update(gvr, action.GetObject(), ns) - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Update(action) return true, obj, err - case DeleteActionImpl: - err := tracker.Delete(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err - } - return true, nil, nil - + obj, err := reactor.Delete(action) + return true, obj, err case PatchActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err + if action.GetPatchType() == types.ApplyPatchType { + obj, err := reactor.Apply(action) + return true, obj, err } + obj, err := reactor.Patch(action) + return true, obj, err + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} - old, err := json.Marshal(obj) - if err != nil { - return true, nil, err - } +type objectTrackerReact struct { + tracker ObjectTracker +} - // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields - // in obj that are removed by patch are cleared - value := reflect.ValueOf(obj) - value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) - - switch action.GetPatchType() { - case types.JSONPatchType: - patch, err := jsonpatch.DecodePatch(action.GetPatch()) - if err != nil { - return true, nil, err - } - modified, err := patch.Apply(old) - if err != nil { - return true, nil, err - } - - if err = json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.MergePatchType: - modified, err := jsonpatch.MergePatch(old, action.GetPatch()) - if err != nil { - return true, nil, err - } - - if err := json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.StrategicMergePatchType, types.ApplyPatchType: - mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) - if err != nil { - return true, nil, err - } - if err = json.Unmarshal(mergedByte, obj); err != nil { - return true, nil, err - } - default: - return true, nil, fmt.Errorf("PatchType is not supported") - } +func (o objectTrackerReact) List(action ListActionImpl) (runtime.Object, error) { + return o.tracker.List(action.GetResource(), action.GetKind(), action.GetNamespace(), action.ListOptions) +} - if err = tracker.Update(gvr, obj, ns); err != nil { - return true, nil, err - } +func (o objectTrackerReact) Get(action GetActionImpl) (runtime.Object, error) { + return o.tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName(), action.GetOptions) +} + +func (o objectTrackerReact) Create(action CreateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } + if action.GetSubresource() == "" { + err = o.tracker.Create(gvr, action.GetObject(), ns, action.CreateOptions) + if err != nil { + return nil, err + } + } else { + oldObj, getOldObjErr := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if getOldObjErr != nil { + return nil, getOldObjErr + } + // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. + if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = o.tracker.Update(gvr, action.GetObject(), ns, metav1.UpdateOptions{ + DryRun: action.CreateOptions.DryRun, + FieldManager: action.CreateOptions.FieldManager, + FieldValidation: action.CreateOptions.FieldValidation, + }) + } else { + // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. + return action.GetObject(), nil + } + } + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Update(action UpdateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } - return true, obj, nil + err = o.tracker.Update(gvr, action.GetObject(), ns, action.UpdateOptions) + if err != nil { + return nil, err + } - default: - return false, nil, fmt.Errorf("no reaction implemented for %s", action) + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Delete(action DeleteActionImpl) (runtime.Object, error) { + err := o.tracker.Delete(action.GetResource(), action.GetNamespace(), action.GetName(), action.DeleteOptions) + return nil, err +} + +func (o objectTrackerReact) Apply(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil { + return nil, err + } + err := o.tracker.Apply(gvr, patchObj, ns, action.PatchOptions) + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Patch(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("PatchType %s is not supported", action.GetPatchType()) } + + if err = o.tracker.Patch(gvr, obj, ns, action.PatchOptions); err != nil { + return nil, err + } + + return obj, nil } type tracker struct { @@ -231,7 +309,11 @@ func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracke } } -func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. @@ -270,7 +352,12 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK return list.DeepCopyObject(), nil } -func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + t.lock.Lock() defer t.lock.Unlock() @@ -283,8 +370,12 @@ func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Inter return fakewatcher, nil } -func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { - errNotFound := errors.NewNotFound(gvr.GroupResource(), name) +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + errNotFound := apierrors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() @@ -305,7 +396,7 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { - return nil, &errors.StatusError{ErrStatus: *status} + return nil, &apierrors.StatusError{ErrStatus: *status} } } @@ -352,11 +443,70 @@ func (t *tracker) Add(obj runtime.Object) error { return nil } -func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } return t.add(gvr, obj, ns, false) } -func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, patchedObject, ns, true) +} + +func (t *tracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + obj, err := t.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + old, err := json.Marshal(obj) + if err != nil { + return err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + // For backward compatibility with behavior 1.30 and earlier, continue to handle apply + // via strategic merge patch (clients may use fake.NewClientset and ManagedFieldObjectTracker + // for full field manager support). + patch, err := json.Marshal(applyConfiguration) + if err != nil { + return err + } + mergedByte, err := strategicpatch.StrategicMergePatch(old, patch, obj) + if err != nil { + return err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return err + } + return t.add(gvr, obj, ns, true) } @@ -398,7 +548,7 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) - return errors.NewBadRequest(msg) + return apierrors.NewBadRequest(msg) } _, ok := t.objects[gvr] @@ -416,12 +566,12 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st t.objects[gvr][namespacedName] = obj return nil } - return errors.NewAlreadyExists(gr, newMeta.GetName()) + return apierrors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. - return errors.NewNotFound(gr, newMeta.GetName()) + return apierrors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj @@ -451,19 +601,23 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { return nil } -func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } delete(objs, namespacedName) @@ -473,6 +627,203 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error return nil } +type managedFieldObjectTracker struct { + ObjectTracker + scheme ObjectScheme + objectConverter runtime.ObjectConvertor + mapper meta.RESTMapper + typeConverter managedfields.TypeConverter +} + +var _ ObjectTracker = &managedFieldObjectTracker{} + +// NewFieldManagedObjectTracker returns an ObjectTracker that can be used to keep track +// of objects and managed fields for the fake clientset. Mostly useful for unit tests. +func NewFieldManagedObjectTracker(scheme *runtime.Scheme, decoder runtime.Decoder, typeConverter managedfields.TypeConverter) ObjectTracker { + return &managedFieldObjectTracker{ + ObjectTracker: NewObjectTracker(scheme, decoder), + scheme: scheme, + objectConverter: scheme, + mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme), + typeConverter: typeConverter, + } +} + +func (t *managedFieldObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.CreateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objType, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + // Stamp GVK + apiVersion, kind := gvk.ToAPIVersionAndKind() + objType.SetAPIVersion(apiVersion) + objType.SetKind(kind) + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + liveObject, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(liveObject, obj, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.UpdateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, obj, opts.FieldManager) + if err != nil { + return err + } + + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(patchedObject) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, patchedObject, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Patch(gvr, objWithManagedFields, ns, vopts...) +} + +func (t *managedFieldObjectTracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + exists := true + liveObject, err := t.ObjectTracker.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + exists = false + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + force := false + if opts.Force != nil { + force = *opts.Force + } + objWithManagedFields, err := mgr.Apply(liveObject, applyConfiguration, opts.FieldManager, force) + if err != nil { + return err + } + + if !exists { + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, metav1.CreateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } else { + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, metav1.UpdateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } +} + +func (t *managedFieldObjectTracker) fieldManagerFor(gvk schema.GroupVersionKind) (*managedfields.FieldManager, error) { + return managedfields.NewDefaultFieldManager( + t.typeConverter, + t.objectConverter, + &objectDefaulter{}, + t.scheme, + gvk, + gvk.GroupVersion(), + "", + nil) +} + +// objectDefaulter implements runtime.Defaulter, but it actually +// does nothing. +type objectDefaulter struct{} + +func (d *objectDefaulter) Default(_ runtime.Object) {} + // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. @@ -579,3 +930,76 @@ func resourceCovers(resource string, action Action) bool { return false } + +// assertOptionalSingleArgument returns an error if there is more than one variadic argument. +// Otherwise, it returns the first variadic argument, or zero value if there are no arguments. +func assertOptionalSingleArgument[T any](arguments []T) (T, error) { + var a T + switch len(arguments) { + case 0: + return a, nil + case 1: + return arguments[0], nil + default: + return a, fmt.Errorf("expected only one option argument but got %d", len(arguments)) + } +} + +type TypeResolver interface { + Type(openAPIName string) typed.ParseableType +} + +type TypeConverter struct { + Scheme *runtime.Scheme + TypeResolver TypeResolver +} + +func (tc TypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + name, err := tc.openAPIName(gvk) + if err != nil { + return nil, err + } + t := tc.TypeResolver.Type(name) + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return t.FromStructured(obj, opts...) + } +} + +func (tc TypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + vu := value.AsValue().Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func (tc TypeConverter) openAPIName(kind schema.GroupVersionKind) (string, error) { + example, err := tc.Scheme.New(kind) + if err != nil { + return "", err + } + rtype := reflect.TypeOf(example).Elem() + name := friendlyName(rtype.PkgPath() + "." + rtype.Name()) + return name, nil +} + +// This is a copy of openapi.friendlyName. +// TODO: consider introducing a shared version of this function in apimachinery. +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index ee19a5af95..e523a66522 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -59,6 +59,12 @@ type Config struct { // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + // ShouldResync is periodically used by the reflector to determine // whether to Resync the Queue. If ShouldResync is `nil` or // returns true, it means the reflector should proceed with the @@ -138,6 +144,7 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.config.Queue, ReflectorOptions{ ResyncPeriod: c.config.FullResyncPeriod, + MinWatchTimeout: c.config.MinWatchTimeout, TypeDescription: c.config.ObjectDescription, Clock: c.clock, }, @@ -346,6 +353,58 @@ func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) { return ObjectToName(obj) } +// InformerOptions configure a Reflector. +type InformerOptions struct { + // ListerWatcher implements List and Watch functions for the source of the resource + // the informer will be informing about. + ListerWatcher ListerWatcher + + // ObjectType is an object of the type that informer is expected to receive. + ObjectType runtime.Object + + // Handler defines functions that should called on object mutations. + Handler ResourceEventHandler + + // ResyncPeriod is the underlying Reflector's resync period. If non-zero, the store + // is re-synced with that frequency - Modify events are delivered even if objects + // didn't change. + // This is useful for synchronizing objects that configure external resources + // (e.g. configure cloud provider functionalities). + // Optional - if unset, store resyncing is not happening periodically. + ResyncPeriod time.Duration + + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + + // Indexers, if set, are the indexers for the received objects to optimize + // certain queries. + // Optional - if unset no indexes are maintained. + Indexers Indexers + + // Transform function, if set, will be called on all objects before they will be + // put into the Store and corresponding Add/Modify/Delete handlers will be invoked + // for them. + // Optional - if unset no additional transforming is happening. + Transform TransformFunc +} + +// NewInformerWithOptions returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +func NewInformerWithOptions(options InformerOptions) (Store, Controller) { + var clientState Store + if options.Indexers == nil { + clientState = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + } else { + clientState = NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers) + } + return clientState, newInformer(clientState, options) +} + // NewInformer returns a Store and a controller for populating the store // while also providing event notifications. You should only used the returned // Store for Get/List operations; Add/Modify/Deletes will cause the event @@ -360,6 +419,8 @@ func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) { // long as possible (until the upstream source closes the watch or times out, // or you stop the controller). // - h is the object you want notifications sent to. +// +// Deprecated: Use NewInformerWithOptions instead. func NewInformer( lw ListerWatcher, objType runtime.Object, @@ -369,7 +430,13 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + } + return clientState, newInformer(clientState, options) } // NewIndexerInformer returns an Indexer and a Controller for populating the index @@ -387,6 +454,8 @@ func NewInformer( // or you stop the controller). // - h is the object you want notifications sent to. // - indexers is the indexer for the received object type. +// +// Deprecated: Use NewInformerWithOptions instead. func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -397,7 +466,14 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + } + return clientState, newInformer(clientState, options) } // NewTransformingInformer returns a Store and a controller for populating @@ -407,6 +483,8 @@ func NewIndexerInformer( // The given transform function will be called on all objects before they will // put into the Store and corresponding Add/Modify/Delete handlers will // be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingInformer( lw ListerWatcher, objType runtime.Object, @@ -417,7 +495,14 @@ func NewTransformingInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Transform: transformer, + } + return clientState, newInformer(clientState, options) } // NewTransformingIndexerInformer returns an Indexer and a controller for @@ -427,6 +512,8 @@ func NewTransformingInformer( // The given transform function will be called on all objects before they will // be put into the Index and corresponding Add/Modify/Delete handlers will // be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -438,7 +525,15 @@ func NewTransformingIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + Transform: transformer, + } + return clientState, newInformer(clientState, options) } // Multiplexes updates in the form of a list of Deltas into a Store, and informs @@ -481,42 +576,29 @@ func processDeltas( // providing event notifications. // // Parameters -// - lw is list and watch functions for the source of the resource you want to -// be informed of. -// - objType is an object of the type that you expect to receive. -// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// - h is the object you want notifications sent to. // - clientState is the store you want to populate -func newInformer( - lw ListerWatcher, - objType runtime.Object, - resyncPeriod time.Duration, - h ResourceEventHandler, - clientState Store, - transformer TransformFunc, -) Controller { +// - options contain the options to configure the controller +func newInformer(clientState Store, options InformerOptions) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, - Transformer: transformer, + Transformer: options.Transform, }) cfg := &Config{ Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, + ListerWatcher: options.ListerWatcher, + ObjectType: options.ObjectType, + FullResyncPeriod: options.ResyncPeriod, + MinWatchTimeout: options.MinWatchTimeout, RetryOnError: false, Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, deltas, isInInitialList) + return processDeltas(options.Handler, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 7160bb1ee7..ce74dfb6f1 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -139,20 +139,17 @@ type DeltaFIFO struct { } // TransformFunc allows for transforming an object before it will be processed. -// TransformFunc (similarly to ResourceEventHandler functions) should be able -// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown. -// -// New in v1.27: In such cases, the contained object will already have gone -// through the transform object separately (when it was added / updated prior -// to the delete), so the TransformFunc can likely safely ignore such objects -// (i.e., just return the input object). // // The most common usage pattern is to clean-up some parts of the object to // reduce component memory usage if a given component doesn't care about them. // -// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc -// sees the object before any other actor, and it is now safe to mutate the -// object in place instead of making a copy. +// New in v1.27: TransformFunc sees the object before any other actor, and it +// is now safe to mutate the object in place instead of making a copy. +// +// It's recommended for the TransformFunc to be idempotent. +// It MUST be idempotent if objects already present in the cache are passed to +// the Replace() to avoid re-mutating them. Default informers do not pass +// existing objects to Replace though. // // Note that TransformFunc is called while inserting objects into the // notification queue and is therefore extremely performance sensitive; please @@ -440,22 +437,38 @@ func isDeletionDup(a, b *Delta) *Delta { // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + return f.queueActionInternalLocked(actionType, actionType, obj) +} + +// queueActionInternalLocked appends to the delta list for the object. +// The actionType is emitted and must honor emitDeltaTypeReplaced. +// The internalActionType is only used within this function and must +// ignore emitDeltaTypeReplaced. +// Caller must lock first. +func (f *DeltaFIFO) queueActionInternalLocked(actionType, internalActionType DeltaType, obj interface{}) error { id, err := f.KeyOf(obj) if err != nil { return KeyError{obj, err} } // Every object comes through this code path once, so this is a good - // place to call the transform func. If obj is a - // DeletedFinalStateUnknown tombstone, then the containted inner object - // will already have gone through the transformer, but we document that - // this can happen. In cases involving Replace(), such an object can - // come through multiple times. + // place to call the transform func. + // + // If obj is a DeletedFinalStateUnknown tombstone or the action is a Sync, + // then the object have already gone through the transformer. + // + // If the objects already present in the cache are passed to Replace(), + // the transformer must be idempotent to avoid re-mutating them, + // or coordinate with all readers from the cache to avoid data races. + // Default informers do not pass existing objects to Replace. if f.transformer != nil { - var err error - obj, err = f.transformer(obj) - if err != nil { - return err + _, isTombstone := obj.(DeletedFinalStateUnknown) + if !isTombstone && internalActionType != Sync { + var err error + obj, err = f.transformer(obj) + if err != nil { + return err + } } } @@ -638,7 +651,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { return KeyError{item, err} } keys.Insert(key) - if err := f.queueActionLocked(action, item); err != nil { + if err := f.queueActionInternalLocked(action, Replaced, item); err != nil { return fmt.Errorf("couldn't enqueue object: %v", err) } } diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 420ca7b2ac..a60f44943e 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -30,7 +30,7 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) -// ListAll calls appendFn with each value retrieved from store which matches the selector. +// ListAll lists items in the store matching the given selector, calling appendFn on each one. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { @@ -51,7 +51,9 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } -// ListAllByNamespace used to list items belongs to namespace from Indexer. +// ListAllByNamespace lists items in the given namespace in the store matching the given selector, +// calling appendFn on each one. +// If a blank namespace (NamespaceAll) is specified, this delegates to ListAll(). func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { if namespace == metav1.NamespaceAll { return ListAll(indexer, selector, appendFn) diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index 10b7e6512e..f5708ffeb1 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -36,6 +36,10 @@ type Lister interface { // Watcher is any object that knows how to start a watch on a resource. type Watcher interface { // Watch should begin a watch at the specified version. + // + // If Watch returns an error, it should handle its own cleanup, including + // but not limited to calling Stop() on the watch, if one was constructed. + // This allows the caller to ignore the watch, if the error is non-nil. Watch(options metav1.ListOptions) (watch.Interface, error) } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index f733e244cc..5e7dd57409 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "math/rand" - "os" "reflect" "strings" "sync" @@ -39,6 +38,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" @@ -49,6 +49,12 @@ import ( const defaultExpectedTypeName = "" +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + defaultMinWatchTimeout = 5 * time.Minute +) + // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. @@ -72,6 +78,8 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager resyncPeriod time.Duration + // minWatchTimeout defines the minimum timeout for watch requests. + minWatchTimeout time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -151,12 +159,6 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { } } -var ( - // We try to spread the load on apiserver by setting timeouts for - // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. - minWatchTimeout = 5 * time.Minute -) - // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { @@ -194,6 +196,10 @@ type ReflectorOptions struct { // (do not resync). ResyncPeriod time.Duration + // MinWatchTimeout, if non-zero, defines the minimum timeout for watch requests send to kube-apiserver. + // However, values lower than 5m will not be honored to avoid negative performance impact on controlplane. + MinWatchTimeout time.Duration + // Clock allows tests to control time. If unset defaults to clock.RealClock{} Clock clock.Clock } @@ -213,9 +219,14 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S if reflectorClock == nil { reflectorClock = clock.RealClock{} } + minWatchTimeout := defaultMinWatchTimeout + if options.MinWatchTimeout > defaultMinWatchTimeout { + minWatchTimeout = options.MinWatchTimeout + } r := &Reflector{ name: options.Name, resyncPeriod: options.ResyncPeriod, + minWatchTimeout: minWatchTimeout, typeDescription: options.TypeDescription, listerWatcher: lw, store: store, @@ -243,9 +254,7 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S // don't overwrite UseWatchList if already set // because the higher layers (e.g. storage/cacher) disabled it on purpose if r.UseWatchList == nil { - if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { - r.UseWatchList = ptr.To(true) - } + r.UseWatchList = ptr.To(clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient)) } return r @@ -357,12 +366,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) - - resyncerrc := make(chan error, 1) - cancelCh := make(chan struct{}) - defer close(cancelCh) - go r.startResync(stopCh, cancelCh, resyncerrc) - return r.watch(w, stopCh, resyncerrc) + return r.watchWithResync(w, stopCh) } // startResync periodically calls r.store.Resync() method. @@ -393,6 +397,15 @@ func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{} } } +// watchWithResync runs watch with startResync in the background. +func (r *Reflector) watchWithResync(w watch.Interface, stopCh <-chan struct{}) error { + resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + // watch simply starts a watch request with the server. func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { var err error @@ -415,7 +428,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc start := r.clock.Now() if w == nil { - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: r.LastSyncResourceVersion(), // We want to avoid situations of hanging watchers. Stop any watchers that do not @@ -442,13 +455,14 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc } } - err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + err = handleWatch(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, + r.clock, resyncerrc, stopCh) // Ensure that watch will not be reused across iterations. w.Stop() w = nil retry.After(err) if err != nil { - if err != errorStopRequested { + if !errors.Is(err, errorStopRequested) { switch { case isExpiredError(err): // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already @@ -642,7 +656,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // TODO(#115478): large "list", slow clients, slow network, p&f // might slow down streaming and eventually fail. // maybe in such a case we should retry with an increased timeout? - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: lastKnownRV, AllowWatchBookmarks: true, @@ -659,14 +673,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { } return nil, err } - bookmarkReceived := pointer.Bool(false) - err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + watchListBookmarkReceived, err := handleListWatch(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, func(rv string) { resourceVersion = rv }, - bookmarkReceived, r.clock, make(chan error), stopCh) if err != nil { w.Stop() // stop and retry with clean state - if err == errorStopRequested { + if errors.Is(err, errorStopRequested) { return nil, nil } if isErrorRetriableWithSideEffectsFn(err) { @@ -674,7 +686,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { } return nil, err } - if *bookmarkReceived { + if watchListBookmarkReceived { break } } @@ -686,10 +698,10 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // we utilize the temporaryStore to ensure independence from the current store implementation. // as of today, the store is implemented as a queue and will be drained by the higher-level // component as soon as it finishes replacing the content. - checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) + checkWatchListDataConsistencyIfRequested(wait.ContextForChannel(stopCh), r.name, resourceVersion, wrapListFuncWithContext(r.listerWatcher.List), temporaryStore.List) - if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { - return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %w", err) } initTrace.Step("SyncWith done") r.setLastSyncResourceVersion(resourceVersion) @@ -706,8 +718,33 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err return r.store.Replace(found, resourceVersion) } -// watchHandler watches w and sets setLastSyncResourceVersion -func watchHandler(start time.Time, +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. If successful, the watcher will be left open after receiving the +// initial set of objects, to allow watching for future events. +func handleListWatch( + start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + exitOnWatchListBookmarkReceived := true + return handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) +} + +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. The watcher will always be stopped on exit. +func handleWatch( + start time.Time, w watch.Interface, store Store, expectedType reflect.Type, @@ -715,31 +752,56 @@ func watchHandler(start time.Time, name string, expectedTypeName string, setLastSyncResourceVersion func(string), - exitOnInitialEventsEndBookmark *bool, clock clock.Clock, - errc chan error, + errCh chan error, stopCh <-chan struct{}, ) error { + exitOnWatchListBookmarkReceived := false + _, err := handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) + return err +} + +// handleAnyWatch consumes events from w, updates the Store, and records the last +// seen ResourceVersion, to allow continuing from that ResourceVersion on retry. +// If exitOnWatchListBookmarkReceived is true, the watch events will be consumed +// until a bookmark event is received with the WatchList annotation present. +// Returns true (watchListBookmarkReceived) if the WatchList bookmark was +// received, even if exitOnWatchListBookmarkReceived is false. +// The watcher will always be stopped, unless exitOnWatchListBookmarkReceived is +// true and watchListBookmarkReceived is true. This allows the same watch stream +// to be re-used by the caller to continue watching for new events. +func handleAnyWatch(start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + exitOnWatchListBookmarkReceived bool, + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + watchListBookmarkReceived := false eventCount := 0 - if exitOnInitialEventsEndBookmark != nil { - // set it to false just in case somebody - // made it positive - *exitOnInitialEventsEndBookmark = false - } + initialEventsEndBookmarkWarningTicker := newInitialEventsEndBookmarkTicker(name, clock, start, exitOnWatchListBookmarkReceived) + defer initialEventsEndBookmarkWarningTicker.Stop() loop: for { select { case <-stopCh: - return errorStopRequested - case err := <-errc: - return err + return watchListBookmarkReceived, errorStopRequested + case err := <-errCh: + return watchListBookmarkReceived, err case event, ok := <-w.ResultChan(): if !ok { break loop } if event.Type == watch.Error { - return apierrors.FromObject(event.Object) + return watchListBookmarkReceived, apierrors.FromObject(event.Object) } if expectedType != nil { if e, a := expectedType, reflect.TypeOf(event.Object); e != a { @@ -780,10 +842,8 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion - if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { - if exitOnInitialEventsEndBookmark != nil { - *exitOnInitialEventsEndBookmark = true - } + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + watchListBookmarkReceived = true } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) @@ -793,20 +853,23 @@ loop: rvu.UpdateResourceVersion(resourceVersion) } eventCount++ - if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + if exitOnWatchListBookmarkReceived && watchListBookmarkReceived { watchDuration := clock.Since(start) klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) - return nil + return watchListBookmarkReceived, nil } + initialEventsEndBookmarkWarningTicker.observeLastEventTimeStamp(clock.Now()) + case <-initialEventsEndBookmarkWarningTicker.C(): + initialEventsEndBookmarkWarningTicker.warnIfExpired() } } watchDuration := clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { - return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) + return watchListBookmarkReceived, fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) } klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount) - return nil + return watchListBookmarkReceived, nil } // LastSyncResourceVersion is the resource version observed when last sync with the underlying store @@ -918,3 +981,95 @@ func isWatchErrorRetriable(err error) bool { } return false } + +// wrapListFuncWithContext simply wraps ListFunction into another function that accepts a context and ignores it. +func wrapListFuncWithContext(listFn ListFunc) func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + return func(_ context.Context, options metav1.ListOptions) (runtime.Object, error) { + return listFn(options) + } +} + +// initialEventsEndBookmarkTicker a ticker that produces a warning if the bookmark event +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note: +// The methods exposed by this type are not thread-safe. +type initialEventsEndBookmarkTicker struct { + clock.Ticker + clock clock.Clock + name string + + watchStart time.Time + tickInterval time.Duration + lastEventObserveTime time.Time +} + +// newInitialEventsEndBookmarkTicker returns a noop ticker if exitOnInitialEventsEndBookmarkRequested is false. +// Otherwise, it returns a ticker that exposes a method producing a warning if the bookmark event, +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note that the caller controls whether to call t.C() and t.Stop(). +// +// In practice, the reflector exits the watchHandler as soon as the bookmark event is received and calls the t.C() method. +func newInitialEventsEndBookmarkTicker(name string, c clock.Clock, watchStart time.Time, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + return newInitialEventsEndBookmarkTickerInternal(name, c, watchStart, 10*time.Second, exitOnWatchListBookmarkReceived) +} + +func newInitialEventsEndBookmarkTickerInternal(name string, c clock.Clock, watchStart time.Time, tickInterval time.Duration, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + clockWithTicker, ok := c.(clock.WithTicker) + if !ok || !exitOnWatchListBookmarkReceived { + if exitOnWatchListBookmarkReceived { + klog.Warningf("clock does not support WithTicker interface but exitOnInitialEventsEndBookmark was requested") + } + return &initialEventsEndBookmarkTicker{ + Ticker: &noopTicker{}, + } + } + + return &initialEventsEndBookmarkTicker{ + Ticker: clockWithTicker.NewTicker(tickInterval), + clock: c, + name: name, + watchStart: watchStart, + tickInterval: tickInterval, + } +} + +func (t *initialEventsEndBookmarkTicker) observeLastEventTimeStamp(lastEventObserveTime time.Time) { + t.lastEventObserveTime = lastEventObserveTime +} + +func (t *initialEventsEndBookmarkTicker) warnIfExpired() { + if err := t.produceWarningIfExpired(); err != nil { + klog.Warning(err) + } +} + +// produceWarningIfExpired returns an error that represents a warning when +// the time elapsed since the last received event exceeds the tickInterval. +// +// Note that this method should be called when t.C() yields a value. +func (t *initialEventsEndBookmarkTicker) produceWarningIfExpired() error { + if _, ok := t.Ticker.(*noopTicker); ok { + return nil /*noop ticker*/ + } + if t.lastEventObserveTime.IsZero() { + return fmt.Errorf("%s: awaiting required bookmark event for initial events stream, no events received for %v", t.name, t.clock.Since(t.watchStart)) + } + elapsedTime := t.clock.Now().Sub(t.lastEventObserveTime) + hasBookmarkTimerExpired := elapsedTime >= t.tickInterval + + if !hasBookmarkTimerExpired { + return nil + } + return fmt.Errorf("%s: hasn't received required bookmark event marking the end of initial events stream, received last event %v ago", t.name, elapsedTime) +} + +var _ clock.Ticker = &noopTicker{} + +// TODO(#115478): move to k8s/utils repo +type noopTicker struct{} + +func (t *noopTicker) C() <-chan time.Time { return nil } + +func (t *noopTicker) Stop() {} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go index aa3027d714..a7e0d9c436 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,102 +18,26 @@ package cache import ( "context" - "os" - "sort" - "strconv" - "time" - "github.com/google/go-cmp/cmp" - - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" + "k8s.io/client-go/util/consistencydetector" ) -var dataConsistencyDetectionEnabled = false - -func init() { - dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) -} - -// checkWatchListConsistencyIfRequested performs a data consistency check only when +// checkWatchListDataConsistencyIfRequested performs a data consistency check only when // the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. // // The consistency check is meant to be enforced only in the CI, not in production. // The check ensures that data retrieved by the watch-list api call -// is exactly the same as data received by the standard list api call. +// is exactly the same as data received by the standard list api call against etcd. // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. -func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - if !dataConsistencyDetectionEnabled { - return - } - checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) -} - -// checkWatchListConsistency exists solely for testing purposes. -// we cannot use checkWatchListConsistencyIfRequested because -// it is guarded by an environmental variable. -// we cannot manipulate the environmental variable because -// it will affect other tests in this package. -func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) - opts := metav1.ListOptions{ - ResourceVersion: lastSyncedResourceVersion, - ResourceVersionMatch: metav1.ResourceVersionMatchExact, - } - var list runtime.Object - err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { - list, err = listerWatcher.List(opts) - if err != nil { - // the consistency check will only be enabled in the CI - // and LIST calls in general will be retired by the client-go library - // if we fail simply log and retry - klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) - return false, nil - } - return true, nil - }) - if err != nil { - klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { + if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { return } - - rawListItems, err := meta.ExtractListWithAlloc(list) - if err != nil { - panic(err) // this should never happen - } - - listItems := toMetaObjectSliceOrDie(rawListItems) - storeItems := toMetaObjectSliceOrDie(store.List()) - - sort.Sort(byUID(listItems)) - sort.Sort(byUID(storeItems)) - - if !cmp.Equal(listItems, storeItems) { - klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) - msg := "data inconsistency detected for the watch-list feature, panicking!" - panic(msg) - } -} - -type byUID []metav1.Object - -func (a byUID) Len() int { return len(a) } -func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } -func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { - result := make([]metav1.Object, len(s)) - for i, v := range s { - m, err := meta.Accessor(v) - if err != nil { - panic(err) - } - result[i] = m - } - return result + // for informers we pass an empty ListOptions because + // listFn might be wrapped for filtering during informer construction. + consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go index dd5f918067..261dcacb5b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "reflect" "strings" @@ -115,7 +114,7 @@ func ShortenConfig(config *Config) { // FlattenConfig changes the config object into a self-contained config (useful for making secrets) func FlattenConfig(config *Config) error { for key, authInfo := range config.AuthInfos { - baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "") if err != nil { return err } @@ -130,7 +129,7 @@ func FlattenConfig(config *Config) error { config.AuthInfos[key] = authInfo } for key, cluster := range config.Clusters { - baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "") if err != nil { return err } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 31f8963160..2cd213ccb3 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -19,7 +19,6 @@ package clientcmd import ( "errors" "os" - "path" "path/filepath" "reflect" "sort" @@ -148,7 +147,7 @@ func NewDefaultPathOptions() *PathOptions { EnvVar: RecommendedConfigPathEnvVar, ExplicitFileFlag: RecommendedConfigPathFlag, - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName), LoadingRules: NewDefaultClientConfigLoadingRules(), } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index af840c4a25..d9d87d55f2 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -159,6 +159,10 @@ type LeaderElectionConfig struct { // Name is the name of the resource lock for debugging Name string + + // Coordinated will use the Coordinated Leader Election feature + // WARNING: Coordinated leader election is ALPHA. + Coordinated bool } // LeaderCallbacks are callbacks that are triggered during certain @@ -249,7 +253,11 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { desc := le.config.Lock.Describe() klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { - succeeded = le.tryAcquireOrRenew(ctx) + if !le.config.Coordinated { + succeeded = le.tryAcquireOrRenew(ctx) + } else { + succeeded = le.tryCoordinatedRenew(ctx) + } le.maybeReportTransition() if !succeeded { klog.V(4).Infof("failed to acquire lease %v", desc) @@ -272,7 +280,11 @@ func (le *LeaderElector) renew(ctx context.Context) { timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline) defer timeoutCancel() err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) { - return le.tryAcquireOrRenew(timeoutCtx), nil + if !le.config.Coordinated { + return le.tryAcquireOrRenew(timeoutCtx), nil + } else { + return le.tryCoordinatedRenew(timeoutCtx), nil + } }, timeoutCtx.Done()) le.maybeReportTransition() @@ -304,7 +316,9 @@ func (le *LeaderElector) release() bool { RenewTime: now, AcquireTime: now, } - if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil { + timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), le.config.RenewDeadline) + defer timeoutCancel() + if err := le.config.Lock.Update(timeoutCtx, leaderElectionRecord); err != nil { klog.Errorf("Failed to release lock: %v", err) return false } @@ -313,6 +327,81 @@ func (le *LeaderElector) release() bool { return true } +// tryCoordinatedRenew checks if it acquired a lease and tries to renew the +// lease if it has already been acquired. Returns true on success else returns +// false. +func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool { + now := metav1.NewTime(le.clock.Now()) + leaderElectionRecord := rl.LeaderElectionRecord{ + HolderIdentity: le.config.Lock.Identity(), + LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), + RenewTime: now, + AcquireTime: now, + } + + // 1. obtain the electionRecord + oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx) + if err != nil { + if !errors.IsNotFound(err) { + klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) + return false + } + klog.Infof("lease lock not found: %v", le.config.Lock.Describe()) + return false + } + + // 2. Record obtained, check the Identity & Time + if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { + le.setObservedRecord(oldLeaderElectionRecord) + + le.observedRawRecord = oldLeaderElectionRawRecord + } + + hasExpired := le.observedTime.Add(time.Second * time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).Before(now.Time) + if hasExpired { + klog.Infof("lock has expired: %v", le.config.Lock.Describe()) + return false + } + + if !le.IsLeader() { + klog.V(6).Infof("lock is held by %v and has not yet expired: %v", oldLeaderElectionRecord.HolderIdentity, le.config.Lock.Describe()) + return false + } + + // 2b. If the lease has been marked as "end of term", don't renew it + if le.IsLeader() && oldLeaderElectionRecord.PreferredHolder != "" { + klog.V(4).Infof("lock is marked as 'end of term': %v", le.config.Lock.Describe()) + // TODO: Instead of letting lease expire, the holder may deleted it directly + // This will not be compatible with all controllers, so it needs to be opt-in behavior. + // We must ensure all code guarded by this lease has successfully completed + // prior to releasing or there may be two processes + // simultaneously acting on the critical path. + // Usually once this returns false, the process is terminated.. + // xref: OnStoppedLeading + return false + } + + // 3. We're going to try to update. The leaderElectionRecord is set to it's default + // here. Let's correct it before updating. + if le.IsLeader() { + leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + leaderElectionRecord.Strategy = oldLeaderElectionRecord.Strategy + le.metrics.slowpathExercised(le.config.Name) + } else { + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 + } + + // update the lock itself + if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil { + klog.Errorf("Failed to update lock: %v", err) + return false + } + + le.setObservedRecord(&leaderElectionRecord) + return true +} + // tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go new file mode 100644 index 0000000000..74cf5bb5c2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go @@ -0,0 +1,202 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "context" + "reflect" + "time" + + v1 "k8s.io/api/coordination/v1" + v1alpha1 "k8s.io/api/coordination/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + coordinationv1alpha1client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +const requeueInterval = 5 * time.Minute + +type CacheSyncWaiter interface { + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool +} + +type LeaseCandidate struct { + leaseClient coordinationv1alpha1client.LeaseCandidateInterface + leaseCandidateInformer cache.SharedIndexInformer + informerFactory informers.SharedInformerFactory + hasSynced cache.InformerSynced + + // At most there will be one item in this Queue (since we only watch one item) + queue workqueue.TypedRateLimitingInterface[int] + + name string + namespace string + + // controller lease + leaseName string + + clock clock.Clock + + binaryVersion, emulationVersion string + preferredStrategies []v1.CoordinatedLeaseStrategy +} + +// NewCandidate creates new LeaseCandidate controller that creates a +// LeaseCandidate object if it does not exist and watches changes +// to the corresponding object and renews if PingTime is set. +// WARNING: This is an ALPHA feature. Ensure that the CoordinatedLeaderElection +// feature gate is on. +func NewCandidate(clientset kubernetes.Interface, + candidateNamespace string, + candidateName string, + targetLease string, + binaryVersion, emulationVersion string, + preferredStrategies []v1.CoordinatedLeaseStrategy, +) (*LeaseCandidate, CacheSyncWaiter, error) { + fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String() + // A separate informer factory is required because this must start before informerFactories + // are started for leader elected components + informerFactory := informers.NewSharedInformerFactoryWithOptions( + clientset, 5*time.Minute, + informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector + }), + ) + leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates().Informer() + + lc := &LeaseCandidate{ + leaseClient: clientset.CoordinationV1alpha1().LeaseCandidates(candidateNamespace), + leaseCandidateInformer: leaseCandidateInformer, + informerFactory: informerFactory, + name: candidateName, + namespace: candidateNamespace, + leaseName: targetLease, + clock: clock.RealClock{}, + binaryVersion: binaryVersion, + emulationVersion: emulationVersion, + preferredStrategies: preferredStrategies, + } + lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"}) + + h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + if leasecandidate, ok := newObj.(*v1alpha1.LeaseCandidate); ok { + if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) { + lc.enqueueLease() + } + } + }, + }) + if err != nil { + return nil, nil, err + } + lc.hasSynced = h.HasSynced + + return lc, informerFactory, nil +} + +func (c *LeaseCandidate) Run(ctx context.Context) { + defer c.queue.ShutDown() + + c.informerFactory.Start(ctx.Done()) + if !cache.WaitForNamedCacheSync("leasecandidateclient", ctx.Done(), c.hasSynced) { + return + } + + c.enqueueLease() + go c.runWorker(ctx) + <-ctx.Done() +} + +func (c *LeaseCandidate) runWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +func (c *LeaseCandidate) processNextWorkItem(ctx context.Context) bool { + key, shutdown := c.queue.Get() + if shutdown { + return false + } + defer c.queue.Done(key) + + err := c.ensureLease(ctx) + if err == nil { + c.queue.AddAfter(key, requeueInterval) + return true + } + + utilruntime.HandleError(err) + c.queue.AddRateLimited(key) + + return true +} + +func (c *LeaseCandidate) enqueueLease() { + c.queue.Add(0) +} + +// ensureLease creates the lease if it does not exist and renew it if it exists. Returns the lease and +// a bool (true if this call created the lease), or any error that occurs. +func (c *LeaseCandidate) ensureLease(ctx context.Context) error { + lease, err := c.leaseClient.Get(ctx, c.name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Creating lease candidate") + // lease does not exist, create it. + leaseToCreate := c.newLeaseCandidate() + if _, err := c.leaseClient.Create(ctx, leaseToCreate, metav1.CreateOptions{}); err != nil { + return err + } + klog.V(2).Infof("Created lease candidate") + return nil + } else if err != nil { + return err + } + klog.V(2).Infof("lease candidate exists. Renewing.") + clone := lease.DeepCopy() + clone.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} + _, err = c.leaseClient.Update(ctx, clone, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +func (c *LeaseCandidate) newLeaseCandidate() *v1alpha1.LeaseCandidate { + lc := &v1alpha1.LeaseCandidate{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.name, + Namespace: c.namespace, + }, + Spec: v1alpha1.LeaseCandidateSpec{ + LeaseName: c.leaseName, + BinaryVersion: c.binaryVersion, + EmulationVersion: c.emulationVersion, + PreferredStrategies: c.preferredStrategies, + }, + } + lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} + return lc +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 483753d632..053a7570d7 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -19,14 +19,15 @@ package resourcelock import ( "context" "fmt" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" "time" + v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" ) const ( @@ -114,11 +115,13 @@ type LeaderElectionRecord struct { // attempt to acquire leases with empty identities and will wait for the full lease // interval to expire before attempting to reacquire. This value is set to empty when // a client voluntarily steps down. - HolderIdentity string `json:"holderIdentity"` - LeaseDurationSeconds int `json:"leaseDurationSeconds"` - AcquireTime metav1.Time `json:"acquireTime"` - RenewTime metav1.Time `json:"renewTime"` - LeaderTransitions int `json:"leaderTransitions"` + HolderIdentity string `json:"holderIdentity"` + LeaseDurationSeconds int `json:"leaseDurationSeconds"` + AcquireTime metav1.Time `json:"acquireTime"` + RenewTime metav1.Time `json:"renewTime"` + LeaderTransitions int `json:"leaderTransitions"` + Strategy v1.CoordinatedLeaseStrategy `json:"strategy"` + PreferredHolder string `json:"preferredHolder"` } // EventRecorder records a change in the ResourceLock. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 8a9d7d60f2..7cd2a8b9ca 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -122,6 +122,12 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec if spec.RenewTime != nil { r.RenewTime = metav1.Time{Time: spec.RenewTime.Time} } + if spec.PreferredHolder != nil { + r.PreferredHolder = *spec.PreferredHolder + } + if spec.Strategy != nil { + r.Strategy = *spec.Strategy + } return &r } @@ -129,11 +135,18 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec { leaseDurationSeconds := int32(ler.LeaseDurationSeconds) leaseTransitions := int32(ler.LeaderTransitions) - return coordinationv1.LeaseSpec{ + spec := coordinationv1.LeaseSpec{ HolderIdentity: &ler.HolderIdentity, LeaseDurationSeconds: &leaseDurationSeconds, AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time}, RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time}, LeaseTransitions: &leaseTransitions, } + if ler.PreferredHolder != "" { + spec.PreferredHolder = &ler.PreferredHolder + } + if ler.Strategy != "" { + spec.Strategy = &ler.Strategy + } + return spec } diff --git a/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go b/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go index 8fb74a4185..7fcc2492bf 100644 --- a/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go +++ b/vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go @@ -21,21 +21,21 @@ import ( "k8s.io/klog/v2" ) -var _ httpstream.Dialer = &fallbackDialer{} +var _ httpstream.Dialer = &FallbackDialer{} -// fallbackDialer encapsulates a primary and secondary dialer, including +// FallbackDialer encapsulates a primary and secondary dialer, including // the boolean function to determine if the primary dialer failed. Implements // the httpstream.Dialer interface. -type fallbackDialer struct { +type FallbackDialer struct { primary httpstream.Dialer secondary httpstream.Dialer shouldFallback func(error) bool } -// NewFallbackDialer creates the fallbackDialer with the primary and secondary dialers, +// NewFallbackDialer creates the FallbackDialer with the primary and secondary dialers, // as well as the boolean function to determine if the primary dialer failed. func NewFallbackDialer(primary, secondary httpstream.Dialer, shouldFallback func(error) bool) httpstream.Dialer { - return &fallbackDialer{ + return &FallbackDialer{ primary: primary, secondary: secondary, shouldFallback: shouldFallback, @@ -47,7 +47,7 @@ func NewFallbackDialer(primary, secondary httpstream.Dialer, shouldFallback func // httstream.Connection and the negotiated protocol version accepted. If the initial // primary dialer fails, this function attempts the secondary dialer. Returns an error // if one occurs. -func (f *fallbackDialer) Dial(protocols ...string) (httpstream.Connection, string, error) { +func (f *FallbackDialer) Dial(protocols ...string) (httpstream.Connection, string, error) { conn, version, err := f.primary.Dial(protocols...) if err != nil && f.shouldFallback(err) { klog.V(4).Infof("fallback to secondary dialer from primary dialer err: %v", err) diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 0745fb4a35..55947d2094 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -203,8 +203,8 @@ func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster { // - The context was nil. // - The context was context.Background() to begin with. // - // Both cases get checked here. - haveCtxCancelation := ctx.Done() == nil + // Both cases get checked here: we have cancelation if (and only if) there is a channel. + haveCtxCancelation := ctx.Done() != nil eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx) @@ -395,7 +395,11 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { watcher, err := e.Watch() if err != nil { + // This function traditionally returns no error even though it can fail. + // Instead, it logs the error and returns an empty watch. The empty + // watch ensures that callers don't crash when calling Stop. klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)") + return watch.NewEmptyWatch() } go func() { defer utilruntime.HandleCrash() diff --git a/vendor/k8s.io/client-go/tools/remotecommand/fallback.go b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go index 3efde3c588..78620d33cf 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/fallback.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go @@ -18,6 +18,8 @@ package remotecommand import ( "context" + + "k8s.io/klog/v2" ) var _ Executor = &FallbackExecutor{} @@ -51,6 +53,7 @@ func (f *FallbackExecutor) Stream(options StreamOptions) error { func (f *FallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { err := f.primary.StreamWithContext(ctx, options) if f.shouldFallback(err) { + klog.V(4).Infof("RemoteCommand fallback: %v", err) return f.secondary.StreamWithContext(ctx, options) } return err diff --git a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go index d81dc43570..8431d02fcb 100644 --- a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go +++ b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "net/http" + "sync" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -53,6 +54,7 @@ type RetryWatcher struct { stopChan chan struct{} doneChan chan struct{} minRestartDelay time.Duration + stopChanLock sync.Mutex } // NewRetryWatcher creates a new RetryWatcher. @@ -286,7 +288,15 @@ func (rw *RetryWatcher) ResultChan() <-chan watch.Event { // Stop implements Interface. func (rw *RetryWatcher) Stop() { - close(rw.stopChan) + rw.stopChanLock.Lock() + defer rw.stopChanLock.Unlock() + + // Prevent closing an already closed channel to prevent a panic + select { + case <-rw.stopChan: + default: + close(rw.stopChan) + } } // Done allows the caller to be notified when Retry watcher stops. diff --git a/vendor/k8s.io/client-go/transport/cert_rotation.go b/vendor/k8s.io/client-go/transport/cert_rotation.go index dc22b6ec4c..e76f65812d 100644 --- a/vendor/k8s.io/client-go/transport/cert_rotation.go +++ b/vendor/k8s.io/client-go/transport/cert_rotation.go @@ -47,14 +47,17 @@ type dynamicClientCert struct { connDialer *connrotation.Dialer // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert { d := &dynamicClientCert{ reload: reload, connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"}, + ), } return d diff --git a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go index 624dd5473a..8286a8eb52 100644 --- a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go +++ b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go @@ -111,6 +111,10 @@ func (rt *RoundTripper) RoundTrip(request *http.Request) (retResp *http.Response wsConn, resp, err := dialer.DialContext(request.Context(), request.URL.String(), request.Header) if err != nil { if errors.Is(err, gwebsocket.ErrBadHandshake) { + // Enhance the error message with the response status if possible. + if resp != nil && len(resp.Status) > 0 { + err = fmt.Errorf("%w (%s)", err, resp.Status) + } return nil, &httpstream.UpgradeFailureError{Cause: err} } return nil, err diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go new file mode 100644 index 0000000000..b33d08032f --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go @@ -0,0 +1,146 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +type RetrieveItemsFunc[U any] func() []U + +type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) + +// CheckDataConsistency exists solely for testing purposes. +// we cannot use checkWatchListDataConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { + if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { + klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) + return + } + klog.Warningf("data consistency check for %s is enabled, this will result in an additional call to the API server.", identity) + + retrievedItems := toMetaObjectSliceOrDie(retrieveItemsFn()) + listOptions = prepareListCallOptions(lastSyncedResourceVersion, listOptions, len(retrievedItems)) + var list runtime.Object + err := wait.PollUntilContextCancel(ctx, time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listFn(ctx, listOptions) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the data consistency check for %s won't be performed, stopCh was closed, err: %v", identity, err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + listItems := toMetaObjectSliceOrDie(rawListItems) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(retrievedItems)) + + if !cmp.Equal(listItems, retrievedItems) { + klog.Infof("previously received data for %s is different than received by the standard list api call against etcd, diff: %v", identity, cmp.Diff(listItems, retrievedItems)) + msg := fmt.Sprintf("data inconsistency detected for %s, panicking!", identity) + panic(msg) + } +} + +// canFormAdditionalListCall ensures that we can form a valid LIST requests +// for checking data consistency. +func canFormAdditionalListCall(lastSyncedResourceVersion string, listOptions metav1.ListOptions) bool { + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the continuation hasn't been set + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L38 + if len(listOptions.Continue) > 0 { + return false + } + + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the RV is valid because the validation code forbids RV == "0" + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L44 + if lastSyncedResourceVersion == "0" { + return false + } + + return true +} + +// prepareListCallOptions changes the input list options so that +// the list call goes directly to etcd +func prepareListCallOptions(lastSyncedResourceVersion string, listOptions metav1.ListOptions, retrievedItemsCount int) metav1.ListOptions { + // this is our legacy case: + // + // the watch cache skips the Limit if the ResourceVersion was set to "0" + // thus, to compare with data retrieved directly from etcd + // we need to skip the limit to for the list call as well. + // + // note that when the number of retrieved items is less than the request limit, + // it means either the watch cache is disabled, or there is not enough data. + // in both cases, we can use the limit because we will be able to compare + // the data with the items retrieved from etcd. + if listOptions.ResourceVersion == "0" && listOptions.Limit > 0 && int64(retrievedItemsCount) > listOptions.Limit { + listOptions.Limit = 0 + } + + // set the RV and RVM so that we get the snapshot of data + // directly from etcd. + listOptions.ResourceVersion = lastSyncedResourceVersion + listOptions.ResourceVersionMatch = metav1.ResourceVersionMatchExact + + return listOptions +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go new file mode 100644 index 0000000000..7610c05c28 --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go @@ -0,0 +1,70 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForListFromCacheEnabled = false + +func init() { + dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR")) +} + +// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup +// for requests that have a high chance of being served from the watch-cache. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by a list api call from the watch-cache +// is exactly the same as data received by the list api call from etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +// +// Note that this function doesn't examine the ListOptions to determine +// if the original request has hit the cache because it would be challenging +// to maintain consistency with the server-side implementation. +// For simplicity, we assume that the first request retrieved data from +// the cache (even though this might not be true for some requests) +// and issue the second call to get data from etcd for comparison. +func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !dataConsistencyDetectionForListFromCacheEnabled { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} + +func checkListFromCacheDataConsistencyIfRequestedInternal[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + receivedListMeta, err := meta.ListAccessor(receivedList) + if err != nil { + panic(err) + } + rawListItems, err := meta.ExtractListWithAlloc(receivedList) + if err != nil { + panic(err) // this should never happen + } + lastSyncedResourceVersion := receivedListMeta.GetResourceVersion() + CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listItemsFn, optionsUsedToReceiveList, func() []runtime.Object { return rawListItems }) +} diff --git a/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go new file mode 100644 index 0000000000..cda5fc205f --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForWatchListEnabled = false + +func init() { + dataConsistencyDetectionForWatchListEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// IsDataConsistencyDetectionForWatchListEnabled returns true when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +func IsDataConsistencyDetectionForWatchListEnabled() bool { + return dataConsistencyDetectionForWatchListEnabled +} + +// CheckWatchListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call against etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func CheckWatchListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !IsDataConsistencyDetectionForWatchListEnabled() { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} diff --git a/vendor/k8s.io/client-go/util/watchlist/watch_list.go b/vendor/k8s.io/client-go/util/watchlist/watch_list.go new file mode 100644 index 0000000000..84106458a5 --- /dev/null +++ b/vendor/k8s.io/client-go/util/watchlist/watch_list.go @@ -0,0 +1,82 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watchlist + +import ( + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientfeatures "k8s.io/client-go/features" + "k8s.io/utils/ptr" +) + +var scheme = runtime.NewScheme() + +func init() { + utilruntime.Must(metainternalversion.AddToScheme(scheme)) +} + +// PrepareWatchListOptionsFromListOptions creates a new ListOptions +// that can be used for a watch-list request from the given listOptions. +// +// This function also determines if the given listOptions can be used to form a watch-list request, +// which would result in streaming semantically equivalent data from the server. +func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return metav1.ListOptions{}, false, nil + } + + internalListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + watchListOptions := listOptions + // this is our legacy case, the cache ignores LIMIT for + // ResourceVersion == 0 and RVM=unset|NotOlderThan + if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" { + return metav1.ListOptions{}, false, nil + } + watchListOptions.Limit = 0 + + // to ensure that we can create a watch-list request that returns + // semantically equivalent data for the given listOptions, + // we need to validate that the RVM for the list is supported by watch-list requests. + if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + return metav1.ListOptions{}, false, nil + } + watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + + watchListOptions.Watch = true + watchListOptions.AllowWatchBookmarks = true + watchListOptions.SendInitialEvents = ptr.To(true) + + internalWatchListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + return watchListOptions, true, nil +} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index efda7c197f..1f9567881c 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -24,49 +24,66 @@ import ( "golang.org/x/time/rate" ) -type RateLimiter interface { +// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead. +type RateLimiter TypedRateLimiter[any] + +type TypedRateLimiter[T comparable] interface { // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration + When(item T) time.Duration // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int + NumRequeues(item T) int } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has // both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +// +// Deprecated: Use DefaultTypedControllerRateLimiter instead. func DefaultControllerRateLimiter() RateLimiter { - return NewMaxOfRateLimiter( - NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + return DefaultTypedControllerRateLimiter[any]() +} + +// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedMaxOfRateLimiter( + NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API -type BucketRateLimiter struct { +// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead. +type BucketRateLimiter = TypedBucketRateLimiter[any] + +// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type TypedBucketRateLimiter[T comparable] struct { *rate.Limiter } var _ RateLimiter = &BucketRateLimiter{} -func (r *BucketRateLimiter) When(item interface{}) time.Duration { +func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration { return r.Limiter.Reserve().Delay() } -func (r *BucketRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int { return 0 } -func (r *BucketRateLimiter) Forget(item interface{}) { +func (r *TypedBucketRateLimiter[T]) Forget(item T) { } -// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit +// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead. +type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any] + +// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit // dealing with max failures and expiration are up to the caller -type ItemExponentialFailureRateLimiter struct { +type TypedItemExponentialFailureRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int baseDelay time.Duration maxDelay time.Duration @@ -74,19 +91,29 @@ type ItemExponentialFailureRateLimiter struct { var _ RateLimiter = &ItemExponentialFailureRateLimiter{} +// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead. func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { - return &ItemExponentialFailureRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay) +} + +func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedItemExponentialFailureRateLimiter[T]{ + failures: map[T]int{}, baseDelay: baseDelay, maxDelay: maxDelay, } } +// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead. func DefaultItemBasedRateLimiter() RateLimiter { - return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) + return DefaultTypedItemBasedRateLimiter[any]() } -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { +func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second) +} + +func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -107,14 +134,14 @@ func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration return calculated } -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { +func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -122,9 +149,13 @@ func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { } // ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that -type ItemFastSlowRateLimiter struct { +// Deprecated: Use TypedItemFastSlowRateLimiter instead. +type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any] + +// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type TypedItemFastSlowRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int maxFastAttempts int fastDelay time.Duration @@ -133,16 +164,21 @@ type ItemFastSlowRateLimiter struct { var _ RateLimiter = &ItemFastSlowRateLimiter{} +// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead. func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { - return &ItemFastSlowRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts) +} + +func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] { + return &TypedItemFastSlowRateLimiter[T]{ + failures: map[T]int{}, fastDelay: fastDelay, slowDelay: slowDelay, maxFastAttempts: maxFastAttempts, } } -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { +func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -155,14 +191,14 @@ func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { return r.slowDelay } -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { +func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -172,11 +208,18 @@ func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { // MaxOfRateLimiter calls every RateLimiter and returns the worst case response // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items // were separately delayed a longer time. -type MaxOfRateLimiter struct { - limiters []RateLimiter +// +// Deprecated: Use TypedMaxOfRateLimiter instead. +type MaxOfRateLimiter = TypedMaxOfRateLimiter[any] + +// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type TypedMaxOfRateLimiter[T comparable] struct { + limiters []TypedRateLimiter[T] } -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { +func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration { ret := time.Duration(0) for _, limiter := range r.limiters { curr := limiter.When(item) @@ -188,11 +231,16 @@ func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { return ret } -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { - return &MaxOfRateLimiter{limiters: limiters} +// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead. +func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter { + return NewTypedMaxOfRateLimiter(limiters...) } -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { +func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] { + return &TypedMaxOfRateLimiter[T]{limiters: limiters} +} + +func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int { ret := 0 for _, limiter := range r.limiters { curr := limiter.NumRequeues(item) @@ -204,23 +252,32 @@ func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { return ret } -func (r *MaxOfRateLimiter) Forget(item interface{}) { +func (r *TypedMaxOfRateLimiter[T]) Forget(item T) { for _, limiter := range r.limiters { limiter.Forget(item) } } // WithMaxWaitRateLimiter have maxDelay which avoids waiting too long -type WithMaxWaitRateLimiter struct { - limiter RateLimiter +// Deprecated: Use TypedWithMaxWaitRateLimiter instead. +type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any] + +// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type TypedWithMaxWaitRateLimiter[T comparable] struct { + limiter TypedRateLimiter[T] maxDelay time.Duration } +// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead. func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { - return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} + return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay) +} + +func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay} } -func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { +func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration { delay := w.limiter.When(item) if delay > w.maxDelay { return w.maxDelay @@ -229,10 +286,10 @@ func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { return delay } -func (w WithMaxWaitRateLimiter) Forget(item interface{}) { +func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) { w.limiter.Forget(item) } -func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { +func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int { return w.limiter.NumRequeues(item) } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index c1df720302..958b96a80c 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -27,14 +27,25 @@ import ( // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // requeue items after failures without ending up in a hot-loop. -type DelayingInterface interface { - Interface +// +// Deprecated: use TypedDelayingInterface instead. +type DelayingInterface TypedDelayingInterface[any] + +// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type TypedDelayingInterface[T comparable] interface { + TypedInterface[T] // AddAfter adds an item to the workqueue after the indicated duration has passed - AddAfter(item interface{}, duration time.Duration) + AddAfter(item T, duration time.Duration) } // DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. -type DelayingQueueConfig struct { +// +// Deprecated: use TypedDelayingQueueConfig instead. +type DelayingQueueConfig = TypedDelayingQueueConfig[any] + +// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type TypedDelayingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -46,25 +57,42 @@ type DelayingQueueConfig struct { Clock clock.WithTicker // Queue optionally allows injecting custom queue Interface instead of the default one. - Queue Interface + Queue TypedInterface[T] } // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use // NewDelayingQueueWithConfig instead and specify a name. +// +// Deprecated: use TypedNewDelayingQueue instead. func NewDelayingQueue() DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{}) } +// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability. +// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// TypedNewDelayingQueueWithConfig instead and specify a name. +func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { + return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{}) +} + // NewDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. +// +// Deprecated: use TypedNewDelayingQueueWithConfig instead. func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + return NewTypedDelayingQueueWithConfig[any](config) +} + +// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.Queue == nil { - config.Queue = NewWithConfig(QueueConfig{ + config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, @@ -100,9 +128,9 @@ func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) Delayi }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { - ret := &delayingType{ - Interface: q, +func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { + ret := &delayingType[T]{ + TypedInterface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), @@ -115,8 +143,8 @@ func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider } // delayingType wraps an Interface and provides delayed re-enquing -type delayingType struct { - Interface +type delayingType[T comparable] struct { + TypedInterface[T] // clock tracks time for delayed firing clock clock.Clock @@ -193,16 +221,16 @@ func (pq waitForPriorityQueue) Peek() interface{} { // ShutDown stops the queue. After the queue drains, the returned shutdown bool // on Get() will be true. This method may be invoked more than once. -func (q *delayingType) ShutDown() { +func (q *delayingType[T]) ShutDown() { q.stopOnce.Do(func() { - q.Interface.ShutDown() + q.TypedInterface.ShutDown() close(q.stopCh) q.heartbeat.Stop() }) } // AddAfter adds the given item to the work queue after the given delay -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { +func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { // don't add if we're already shutting down if q.ShuttingDown() { return @@ -229,7 +257,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { const maxWait = 10 * time.Second // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType) waitingLoop() { +func (q *delayingType[T]) waitingLoop() { defer utilruntime.HandleCrash() // Make a placeholder channel to use when there are no items in our list @@ -244,7 +272,7 @@ func (q *delayingType) waitingLoop() { waitingEntryByData := map[t]*waitFor{} for { - if q.Interface.ShuttingDown() { + if q.TypedInterface.ShuttingDown() { return } @@ -258,7 +286,7 @@ func (q *delayingType) waitingLoop() { } entry = heap.Pop(waitingForQueue).(*waitFor) - q.Add(entry.data) + q.Add(entry.data.(T)) delete(waitingEntryByData, entry.data) } @@ -287,7 +315,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } drained := false @@ -297,7 +325,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } default: drained = true diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index a363d1afb4..ff715482c1 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -23,18 +23,66 @@ import ( "k8s.io/utils/clock" ) -type Interface interface { - Add(item interface{}) +// Deprecated: Interface is deprecated, use TypedInterface instead. +type Interface TypedInterface[any] + +type TypedInterface[T comparable] interface { + Add(item T) Len() int - Get() (item interface{}, shutdown bool) - Done(item interface{}) + Get() (item T, shutdown bool) + Done(item T) ShutDown() ShutDownWithDrain() ShuttingDown() bool } +// Queue is the underlying storage for items. The functions below are always +// called from the same goroutine. +type Queue[T comparable] interface { + // Touch can be hooked when an existing item is added again. This may be + // useful if the implementation allows priority change for the given item. + Touch(item T) + // Push adds a new item. + Push(item T) + // Len tells the total number of items. + Len() int + // Pop retrieves an item. + Pop() (item T) +} + +// DefaultQueue is a slice based FIFO queue. +func DefaultQueue[T comparable]() Queue[T] { + return new(queue[T]) +} + +// queue is a slice which implements Queue. +type queue[T comparable] []T + +func (q *queue[T]) Touch(item T) {} + +func (q *queue[T]) Push(item T) { + *q = append(*q, item) +} + +func (q *queue[T]) Len() int { + return len(*q) +} + +func (q *queue[T]) Pop() (item T) { + item = (*q)[0] + + // The underlying array still exists and reference this object, so the object will not be garbage collected. + (*q)[0] = *new(T) + *q = (*q)[1:] + + return item +} + // QueueConfig specifies optional configurations to customize an Interface. -type QueueConfig struct { +// Deprecated: use TypedQueueConfig instead. +type QueueConfig = TypedQueueConfig[any] + +type TypedQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -44,18 +92,38 @@ type QueueConfig struct { // Clock ability to inject real or fake clock for testing purposes. Clock clock.WithTicker + + // Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue. + Queue Queue[T] } // New constructs a new work queue (see the package comment). +// +// Deprecated: use NewTyped instead. func New() *Type { return NewWithConfig(QueueConfig{ Name: "", }) } +// NewTyped constructs a new work queue (see the package comment). +func NewTyped[T comparable]() *Typed[T] { + return NewTypedWithConfig(TypedQueueConfig[T]{ + Name: "", + }) +} + // NewWithConfig constructs a new workqueue with ability to // customize different properties. +// +// Deprecated: use NewTypedWithConfig instead. func NewWithConfig(config QueueConfig) *Type { + return NewTypedWithConfig(config) +} + +// NewTypedWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] { return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) } @@ -69,7 +137,7 @@ func NewNamed(name string) *Type { // newQueueWithConfig constructs a new named workqueue // with the ability to customize different properties for testing purposes -func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { +func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] { var metricsFactory *queueMetricsFactory if config.MetricsProvider != nil { metricsFactory = &queueMetricsFactory{ @@ -83,18 +151,24 @@ func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { config.Clock = clock.RealClock{} } + if config.Queue == nil { + config.Queue = DefaultQueue[T]() + } + return newQueue( config.Clock, + config.Queue, metricsFactory.newQueueMetrics(config.Name, config.Clock), updatePeriod, ) } -func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { - t := &Type{ +func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] { + t := &Typed[T]{ clock: c, - dirty: set{}, - processing: set{}, + queue: queue, + dirty: set[T]{}, + processing: set[T]{}, cond: sync.NewCond(&sync.Mutex{}), metrics: metrics, unfinishedWorkUpdatePeriod: updatePeriod, @@ -112,20 +186,23 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond // Type is a work queue (see the package comment). -type Type struct { +// Deprecated: Use Typed instead. +type Type = Typed[any] + +type Typed[t comparable] struct { // queue defines the order in which we will work on items. Every // element of queue should be in the dirty set and not in the // processing set. - queue []t + queue Queue[t] // dirty defines all of the items that need to be processed. - dirty set + dirty set[t] // Things that are currently being processed are in the processing set. // These things may be simultaneously in the dirty set. When we finish // processing something and remove it from this set, we'll check if // it's in the dirty set, and if so, add it to the queue. - processing set + processing set[t] cond *sync.Cond @@ -140,33 +217,38 @@ type Type struct { type empty struct{} type t interface{} -type set map[t]empty +type set[t comparable] map[t]empty -func (s set) has(item t) bool { +func (s set[t]) has(item t) bool { _, exists := s[item] return exists } -func (s set) insert(item t) { +func (s set[t]) insert(item t) { s[item] = empty{} } -func (s set) delete(item t) { +func (s set[t]) delete(item t) { delete(s, item) } -func (s set) len() int { +func (s set[t]) len() int { return len(s) } // Add marks item as needing processing. -func (q *Type) Add(item interface{}) { +func (q *Typed[T]) Add(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() if q.shuttingDown { return } if q.dirty.has(item) { + // the same item is added again before it is processed, call the Touch + // function if the queue cares about it (for e.g, reset its priority) + if !q.processing.has(item) { + q.queue.Touch(item) + } return } @@ -177,37 +259,34 @@ func (q *Type) Add(item interface{}) { return } - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } // Len returns the current queue length, for informational purposes only. You // shouldn't e.g. gate a call to Add() or Get() on Len() being a particular // value, that can't be synchronized properly. -func (q *Type) Len() int { +func (q *Typed[T]) Len() int { q.cond.L.Lock() defer q.cond.L.Unlock() - return len(q.queue) + return q.queue.Len() } // Get blocks until it can return an item to be processed. If shutdown = true, // the caller should end their goroutine. You must call Done with item when you // have finished processing it. -func (q *Type) Get() (item interface{}, shutdown bool) { +func (q *Typed[T]) Get() (item T, shutdown bool) { q.cond.L.Lock() defer q.cond.L.Unlock() - for len(q.queue) == 0 && !q.shuttingDown { + for q.queue.Len() == 0 && !q.shuttingDown { q.cond.Wait() } - if len(q.queue) == 0 { + if q.queue.Len() == 0 { // We must be shutting down. - return nil, true + return *new(T), true } - item = q.queue[0] - // The underlying array still exists and reference this object, so the object will not be garbage collected. - q.queue[0] = nil - q.queue = q.queue[1:] + item = q.queue.Pop() q.metrics.get(item) @@ -220,7 +299,7 @@ func (q *Type) Get() (item interface{}, shutdown bool) { // Done marks item as done processing, and if it has been marked as dirty again // while it was being processed, it will be re-added to the queue for // re-processing. -func (q *Type) Done(item interface{}) { +func (q *Typed[T]) Done(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -228,7 +307,7 @@ func (q *Type) Done(item interface{}) { q.processing.delete(item) if q.dirty.has(item) { - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } else if q.processing.len() == 0 { q.cond.Signal() @@ -237,7 +316,7 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. -func (q *Type) ShutDown() { +func (q *Typed[T]) ShutDown() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -255,7 +334,7 @@ func (q *Type) ShutDown() { // indefinitely. It is, however, safe to call ShutDown after having called // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. -func (q *Type) ShutDownWithDrain() { +func (q *Typed[T]) ShutDownWithDrain() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -268,14 +347,14 @@ func (q *Type) ShutDownWithDrain() { } } -func (q *Type) ShuttingDown() bool { +func (q *Typed[T]) ShuttingDown() bool { q.cond.L.Lock() defer q.cond.L.Unlock() return q.shuttingDown } -func (q *Type) updateUnfinishedWorkLoop() { +func (q *Typed[T]) updateUnfinishedWorkLoop() { t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) defer t.Stop() for range t.C() { diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 3e4016fb04..fe45afa5a4 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -19,24 +19,33 @@ package workqueue import "k8s.io/utils/clock" // RateLimitingInterface is an interface that rate limits items being added to the queue. -type RateLimitingInterface interface { - DelayingInterface +// +// Deprecated: Use TypedRateLimitingInterface instead. +type RateLimitingInterface TypedRateLimitingInterface[any] + +// TypedRateLimitingInterface is an interface that rate limits items being added to the queue. +type TypedRateLimitingInterface[T comparable] interface { + TypedDelayingInterface[T] // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok - AddRateLimited(item interface{}) + AddRateLimited(item T) // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many times the item was requeued - NumRequeues(item interface{}) int + NumRequeues(item T) int } // RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. +// +// Deprecated: Use TypedRateLimitingQueueConfig instead. +type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any] -type RateLimitingQueueConfig struct { +// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface. +type TypedRateLimitingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -48,36 +57,55 @@ type RateLimitingQueueConfig struct { Clock clock.WithTicker // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. - DelayingQueue DelayingInterface + DelayingQueue TypedDelayingInterface[T] } // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use // NewRateLimitingQueueWithConfig instead and specify a name. +// +// Deprecated: Use NewTypedRateLimitingQueue instead. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) } +// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewTypedRateLimitingQueueWithConfig instead and specify a name. +func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{}) +} + // NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability // with options to customize different properties. // Remember to call Forget! If you don't, you may end up tracking failures forever. +// +// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, config) +} + +// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.DelayingQueue == nil { - config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, }) } - return &rateLimitingType{ - DelayingInterface: config.DelayingQueue, - rateLimiter: rateLimiter, + return &rateLimitingType[T]{ + TypedDelayingInterface: config.DelayingQueue, + rateLimiter: rateLimiter, } } @@ -99,21 +127,21 @@ func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter } // rateLimitingType wraps an Interface and provides rateLimited re-enquing -type rateLimitingType struct { - DelayingInterface +type rateLimitingType[T comparable] struct { + TypedDelayingInterface[T] - rateLimiter RateLimiter + rateLimiter TypedRateLimiter[T] } // AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok -func (q *rateLimitingType) AddRateLimited(item interface{}) { - q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +func (q *rateLimitingType[T]) AddRateLimited(item T) { + q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } -func (q *rateLimitingType) NumRequeues(item interface{}) int { +func (q *rateLimitingType[T]) NumRequeues(item T) int { return q.rateLimiter.NumRequeues(item) } -func (q *rateLimitingType) Forget(item interface{}) { +func (q *rateLimitingType[T]) Forget(item T) { q.rateLimiter.Forget(item) } diff --git a/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go b/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go index bd2cf5f875..728fa520b6 100644 --- a/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go +++ b/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go @@ -33,8 +33,9 @@ import ( // while still allowing the distribution of key-value pairs across multiple flag invocations. // For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` type ColonSeparatedMultimapStringString struct { - Multimap *map[string][]string - initialized bool // set to true after the first Set call + Multimap *map[string][]string + initialized bool // set to true after the first Set call + allowDefaultEmptyKey bool } // NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the @@ -43,6 +44,12 @@ func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparat return &ColonSeparatedMultimapStringString{Multimap: m} } +// NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey takes a pointer to a map[string][]string and returns the +// ColonSeparatedMultimapStringString flag parsing shim for that map. It allows default empty key with no colon in the flag. +func NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(m *map[string][]string) *ColonSeparatedMultimapStringString { + return &ColonSeparatedMultimapStringString{Multimap: m, allowDefaultEmptyKey: true} +} + // Set implements github.com/spf13/pflag.Value func (m *ColonSeparatedMultimapStringString) Set(value string) error { if m.Multimap == nil { @@ -58,11 +65,16 @@ func (m *ColonSeparatedMultimapStringString) Set(value string) error { continue } kv := strings.SplitN(pair, ":", 2) - if len(kv) != 2 { - return fmt.Errorf("malformed pair, expect string:string") + var k, v string + if m.allowDefaultEmptyKey && len(kv) == 1 { + v = strings.TrimSpace(kv[0]) + } else { + if len(kv) != 2 { + return fmt.Errorf("malformed pair, expect string:string") + } + k = strings.TrimSpace(kv[0]) + v = strings.TrimSpace(kv[1]) } - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) (*m.Multimap)[k] = append((*m.Multimap)[k], v) } return nil diff --git a/vendor/k8s.io/component-base/featuregate/feature_gate.go b/vendor/k8s.io/component-base/featuregate/feature_gate.go index 1e441289ea..b6f08a6cd6 100644 --- a/vendor/k8s.io/component-base/featuregate/feature_gate.go +++ b/vendor/k8s.io/component-base/featuregate/feature_gate.go @@ -19,6 +19,7 @@ package featuregate import ( "context" "fmt" + "reflect" "sort" "strconv" "strings" @@ -27,8 +28,12 @@ import ( "github.com/spf13/pflag" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/naming" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" featuremetrics "k8s.io/component-base/metrics/prometheus/feature" + baseversion "k8s.io/component-base/version" "k8s.io/klog/v2" ) @@ -52,13 +57,13 @@ const ( var ( // The generic features. - defaultFeatures = map[Feature]FeatureSpec{ - allAlphaGate: {Default: false, PreRelease: Alpha}, - allBetaGate: {Default: false, PreRelease: Beta}, + defaultFeatures = map[Feature]VersionedSpecs{ + allAlphaGate: {{Default: false, PreRelease: Alpha, Version: version.MajorMinor(0, 0)}}, + allBetaGate: {{Default: false, PreRelease: Beta, Version: version.MajorMinor(0, 0)}}, } // Special handling for a few gates. - specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ + specialFeatures = map[Feature]func(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version){ allAlphaGate: setUnsetAlphaGates, allBetaGate: setUnsetBetaGates, } @@ -69,13 +74,28 @@ type FeatureSpec struct { Default bool // LockToDefault indicates that the feature is locked to its default and cannot be changed LockToDefault bool - // PreRelease indicates the maturity level of the feature + // PreRelease indicates the current maturity level of the feature PreRelease prerelease + // Version indicates the earliest version from which this FeatureSpec is valid. + // If multiple FeatureSpecs exist for a Feature, the one with the highest version that is less + // than or equal to the effective version of the component is used. + Version *version.Version } +type VersionedSpecs []FeatureSpec + +func (g VersionedSpecs) Len() int { return len(g) } +func (g VersionedSpecs) Less(i, j int) bool { + return g[i].Version.LessThan(g[j].Version) +} +func (g VersionedSpecs) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +type PromotionVersionMapping map[prerelease]string + type prerelease string const ( + PreAlpha = prerelease("PRE-ALPHA") // Values for PreRelease. Alpha = prerelease("ALPHA") Beta = prerelease("BETA") @@ -94,7 +114,9 @@ type FeatureGate interface { // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be // set on the copy without mutating the original. This is useful for validating // config against potential feature gate changes before committing those changes. - DeepCopy() MutableFeatureGate + DeepCopy() MutableVersionedFeatureGate + // Validate checks if the flag gates are valid at the emulated version. + Validate() []error } // MutableFeatureGate parses and stores flag gates for known features from @@ -104,6 +126,8 @@ type MutableFeatureGate interface { // AddFlag adds a flag for setting global feature gates to the specified FlagSet. AddFlag(fs *pflag.FlagSet) + // Close sets closed to true, and prevents subsequent calls to Add + Close() // Set parses and stores flag gates for known features // from a string like feature1=true,feature2=false,... Set(value string) error @@ -128,25 +152,77 @@ type MutableFeatureGate interface { OverrideDefault(name Feature, override bool) error } +// MutableVersionedFeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +// MutableVersionedFeatureGate sets options based on the emulated version of the featured gate. +type MutableVersionedFeatureGate interface { + MutableFeatureGate + // EmulationVersion returns the version the feature gate is set to emulate. + // If set, the feature gate would enable/disable features based on + // feature availability and pre-release at the emulated version instead of the binary version. + EmulationVersion() *version.Version + // SetEmulationVersion overrides the emulationVersion of the feature gate. + // Otherwise, the emulationVersion will be the same as the binary version. + // If set, the feature defaults and availability will be as if the binary is at the emulated version. + SetEmulationVersion(emulationVersion *version.Version) error + // GetAll returns a copy of the map of known feature names to versioned feature specs. + GetAllVersioned() map[Feature]VersionedSpecs + // AddVersioned adds versioned feature specs to the featureGate. + AddVersioned(features map[Feature]VersionedSpecs) error + // OverrideDefaultAtVersion sets a local override for the registered default value of a named + // feature for the prerelease lifecycle the given version is at. + // If the feature has not been previously registered (e.g. by a call to Add), + // has a locked default, or if the gate has already registered itself with a FlagSet, a non-nil + // error is returned. + // + // When two or more components consume a common feature, one component can override its + // default at runtime in order to adopt new defaults before or after the other + // components. For example, a new feature can be evaluated with a limited blast radius by + // overriding its default to true for a limited number of components without simultaneously + // changing its default for all consuming components. + OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error + // ExplicitlySet returns true if the feature value is explicitly set instead of + // being derived from the default values or special features. + ExplicitlySet(name Feature) bool + // ResetFeatureValueToDefault resets the value of the feature back to the default value. + ResetFeatureValueToDefault(name Feature) error + // DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides, + // and resets all the enabled status of the new feature gate. + // This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate. + DeepCopyAndReset() MutableVersionedFeatureGate +} + // featureGate implements FeatureGate as well as pflag.Value for flag parsing. type featureGate struct { featureGateName string - special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) + special map[Feature]func(map[Feature]VersionedSpecs, map[Feature]bool, bool, *version.Version) - // lock guards writes to known, enabled, and reads/writes of closed + // lock guards writes to all below fields. lock sync.Mutex // known holds a map[Feature]FeatureSpec known atomic.Value // enabled holds a map[Feature]bool enabled atomic.Value + // enabledRaw holds a raw map[string]bool of the parsed flag. + // It keeps the original values of "special" features like "all alpha gates", + // while enabled keeps the values of all resolved features. + enabledRaw atomic.Value // closed is set to true when AddFlag is called, and prevents subsequent calls to Add closed bool + // queriedFeatures stores all the features that have been queried through the Enabled interface. + // It is reset when SetEmulationVersion is called. + queriedFeatures atomic.Value + emulationVersion atomic.Pointer[version.Version] } -func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { +func setUnsetAlphaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) { for k, v := range known { - if v.PreRelease == Alpha { + if k == "AllAlpha" || k == "AllBeta" { + continue + } + featureSpec := featureSpecAtEmulationVersion(v, cVer) + if featureSpec.PreRelease == Alpha { if _, found := enabled[k]; !found { enabled[k] = val } @@ -154,9 +230,13 @@ func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, } } -func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { +func setUnsetBetaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) { for k, v := range known { - if v.PreRelease == Beta { + if k == "AllAlpha" || k == "AllBeta" { + continue + } + featureSpec := featureSpecAtEmulationVersion(v, cVer) + if featureSpec.PreRelease == Beta { if _, found := enabled[k]; !found { enabled[k] = val } @@ -171,8 +251,10 @@ var _ pflag.Value = &featureGate{} // call chains, so they'd be unhelpful as names. var internalPackages = []string{"k8s.io/component-base/featuregate/feature_gate.go"} -func NewFeatureGate() *featureGate { - known := map[Feature]FeatureSpec{} +// NewVersionedFeatureGate creates a feature gate with the emulation version set to the provided version. +// SetEmulationVersion can be called after to change emulation version to a desired value. +func NewVersionedFeatureGate(emulationVersion *version.Version) *featureGate { + known := map[Feature]VersionedSpecs{} for k, v := range defaultFeatures { known[k] = v } @@ -183,10 +265,19 @@ func NewFeatureGate() *featureGate { } f.known.Store(known) f.enabled.Store(map[Feature]bool{}) - + f.enabledRaw.Store(map[string]bool{}) + f.emulationVersion.Store(emulationVersion) + f.queriedFeatures.Store(sets.Set[Feature]{}) + klog.V(1).Infof("new feature gate with emulationVersion=%s", f.emulationVersion.Load().String()) return f } +// NewFeatureGate creates a feature gate with the current binary version. +func NewFeatureGate() *featureGate { + binaryVersison := version.MustParse(baseversion.DefaultKubeBinaryVersion) + return NewVersionedFeatureGate(binaryVersison) +} + // Set parses a string of the form "key1=value1,key2=value2,..." into a // map[string]bool of known keys or returns an error. func (f *featureGate) Set(value string) error { @@ -210,35 +301,52 @@ func (f *featureGate) Set(value string) error { return f.SetFromMap(m) } -// SetFromMap stores flag gates for known features from a map[string]bool or returns an error -func (f *featureGate) SetFromMap(m map[string]bool) error { +// Validate checks if the flag gates are valid at the emulated version. +func (f *featureGate) Validate() []error { f.lock.Lock() defer f.lock.Unlock() + m, ok := f.enabledRaw.Load().(map[string]bool) + if !ok { + return []error{fmt.Errorf("cannot cast enabledRaw to map[string]bool")} + } + enabled := map[Feature]bool{} + return f.unsafeSetFromMap(enabled, m, f.EmulationVersion()) +} +// unsafeSetFromMap stores flag gates for known features from a map[string]bool into an enabled map. +func (f *featureGate) unsafeSetFromMap(enabled map[Feature]bool, m map[string]bool, emulationVersion *version.Version) []error { + var errs []error // Copy existing state - known := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known := map[Feature]VersionedSpecs{} + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + sort.Sort(v) known[k] = v } - enabled := map[Feature]bool{} - for k, v := range f.enabled.Load().(map[Feature]bool) { - enabled[k] = v - } for k, v := range m { - k := Feature(k) - featureSpec, ok := known[k] + key := Feature(k) + versionedSpecs, ok := known[key] if !ok { - return fmt.Errorf("unrecognized feature gate: %s", k) + // early return if encounters an unknown feature. + errs = append(errs, fmt.Errorf("unrecognized feature gate: %s", k)) + return errs } + featureSpec := featureSpecAtEmulationVersion(versionedSpecs, emulationVersion) if featureSpec.LockToDefault && featureSpec.Default != v { - return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default) + errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default)) + continue } - enabled[k] = v // Handle "special" features like "all alpha gates" - if fn, found := f.special[k]; found { - fn(known, enabled, v) + if fn, found := f.special[key]; found { + fn(known, enabled, v, emulationVersion) + enabled[key] = v + continue + } + if featureSpec.PreRelease == PreAlpha { + errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is PreAlpha at emulated version %s", k, v, emulationVersion.String())) + continue } + enabled[key] = v if featureSpec.PreRelease == Deprecated { klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) @@ -246,13 +354,39 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) } } + return errs +} - // Persist changes - f.known.Store(known) - f.enabled.Store(enabled) +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() - klog.V(1).Infof("feature gates: %v", f.enabled) - return nil + // Copy existing state + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + + // Update enabledRaw first. + // SetFromMap might be called when emulationVersion is not finalized yet, and we do not know the final state of enabled. + // But the flags still need to be saved. + for k, v := range m { + enabledRaw[k] = v + } + f.enabledRaw.Store(enabledRaw) + + errs := f.unsafeSetFromMap(enabled, enabledRaw, f.EmulationVersion()) + if len(errs) == 0 { + // Persist changes + f.enabled.Store(enabled) + klog.V(1).Infof("feature gates: %v", f.enabled) + } + return utilerrors.NewAggregate(errs) } // String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". @@ -271,6 +405,17 @@ func (f *featureGate) Type() string { // Add adds features to the featureGate. func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + vs := map[Feature]VersionedSpecs{} + for name, spec := range features { + // if no version is provided for the FeatureSpec, it is defaulted to version 0.0 so that it can be enabled/disabled regardless of emulation version. + spec.Version = version.MajorMinor(0, 0) + vs[name] = VersionedSpecs{spec} + } + return f.AddVersioned(vs) +} + +// AddVersioned adds versioned feature specs to the featureGate. +func (f *featureGate) AddVersioned(features map[Feature]VersionedSpecs) error { f.lock.Lock() defer f.lock.Unlock() @@ -279,20 +424,18 @@ func (f *featureGate) Add(features map[Feature]FeatureSpec) error { } // Copy existing state - known := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - known[k] = v - } + known := f.GetAllVersioned() - for name, spec := range features { + for name, specs := range features { + sort.Sort(specs) if existingSpec, found := known[name]; found { - if existingSpec == spec { + sort.Sort(existingSpec) + if reflect.DeepEqual(existingSpec, specs) { continue } return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) } - - known[name] = spec + known[name] = specs } // Persist updated state @@ -302,6 +445,10 @@ func (f *featureGate) Add(features map[Feature]FeatureSpec) error { } func (f *featureGate) OverrideDefault(name Feature, override bool) error { + return f.OverrideDefaultAtVersion(name, override, f.EmulationVersion()) +} + +func (f *featureGate) OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error { f.lock.Lock() defer f.lock.Unlock() @@ -309,17 +456,19 @@ func (f *featureGate) OverrideDefault(name Feature, override bool) error { return fmt.Errorf("cannot override default for feature %q: gates already added to a flag set", name) } - known := map[Feature]FeatureSpec{} - for name, spec := range f.known.Load().(map[Feature]FeatureSpec) { - known[name] = spec - } + // Copy existing state + known := f.GetAllVersioned() - spec, ok := known[name] - switch { - case !ok: + specs, ok := known[name] + if !ok { return fmt.Errorf("cannot override default: feature %q is not registered", name) + } + spec := featureSpecAtEmulationVersion(specs, ver) + switch { case spec.LockToDefault: return fmt.Errorf("cannot override default: feature %q default is locked to %t", name, spec.Default) + case spec.PreRelease == PreAlpha: + return fmt.Errorf("cannot override default: feature %q is not available before version %s", name, ver.String()) case spec.PreRelease == Deprecated: klog.Warningf("Overriding default of deprecated feature gate %s=%t. It will be removed in a future release.", name, override) case spec.PreRelease == GA: @@ -327,42 +476,155 @@ func (f *featureGate) OverrideDefault(name Feature, override bool) error { } spec.Default = override - known[name] = spec + known[name] = specs f.known.Store(known) return nil } -// GetAll returns a copy of the map of known feature names to feature specs. +// GetAll returns a copy of the map of known feature names to feature specs for the current emulationVersion. func (f *featureGate) GetAll() map[Feature]FeatureSpec { retval := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - retval[k] = v + f.lock.Lock() + versionedSpecs := f.GetAllVersioned() + emuVer := f.EmulationVersion() + f.lock.Unlock() + for k, v := range versionedSpecs { + spec := featureSpecAtEmulationVersion(v, emuVer) + if spec.PreRelease == PreAlpha { + // The feature is not available at the emulation version. + continue + } + retval[k] = *spec } return retval } +// GetAllVersioned returns a copy of the map of known feature names to versioned feature specs. +func (f *featureGate) GetAllVersioned() map[Feature]VersionedSpecs { + retval := map[Feature]VersionedSpecs{} + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + vCopy := make([]FeatureSpec, len(v)) + _ = copy(vCopy, v) + retval[k] = vCopy + } + return retval +} + +func (f *featureGate) SetEmulationVersion(emulationVersion *version.Version) error { + if emulationVersion.EqualTo(f.EmulationVersion()) { + return nil + } + f.lock.Lock() + defer f.lock.Unlock() + klog.V(1).Infof("set feature gate emulationVersion to %s", emulationVersion.String()) + + // Copy existing state + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + // enabled map should be reset whenever emulationVersion is changed. + enabled := map[Feature]bool{} + errs := f.unsafeSetFromMap(enabled, enabledRaw, emulationVersion) + + queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature]) + known := f.known.Load().(map[Feature]VersionedSpecs) + for feature := range queriedFeatures { + newVal := featureEnabled(feature, enabled, known, emulationVersion) + oldVal := featureEnabled(feature, f.enabled.Load().(map[Feature]bool), known, f.EmulationVersion()) + if newVal != oldVal { + klog.Warningf("SetEmulationVersion will change already queried feature:%s from %v to %v", feature, oldVal, newVal) + } + } + + if len(errs) == 0 { + // Persist changes + f.enabled.Store(enabled) + f.emulationVersion.Store(emulationVersion) + f.queriedFeatures.Store(sets.Set[Feature]{}) + } + return utilerrors.NewAggregate(errs) +} + +func (f *featureGate) EmulationVersion() *version.Version { + return f.emulationVersion.Load() +} + +// featureSpec returns the featureSpec at the EmulationVersion if the key exists, an error otherwise. +// This is useful to keep multiple implementations of a feature based on the PreRelease or Version info. +func (f *featureGate) featureSpec(key Feature) (FeatureSpec, error) { + if v, ok := f.known.Load().(map[Feature]VersionedSpecs)[key]; ok { + featureSpec := f.featureSpecAtEmulationVersion(v) + return *featureSpec, nil + } + return FeatureSpec{}, fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName) +} + +func (f *featureGate) unsafeRecordQueried(key Feature) { + queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature]) + if _, ok := queriedFeatures[key]; ok { + return + } + // Clone items from queriedFeatures before mutating it + newQueriedFeatures := queriedFeatures.Clone() + newQueriedFeatures.Insert(key) + f.queriedFeatures.Store(newQueriedFeatures) +} + +func featureEnabled(key Feature, enabled map[Feature]bool, known map[Feature]VersionedSpecs, emulationVersion *version.Version) bool { + // check explicitly set enabled list + if v, ok := enabled[key]; ok { + return v + } + if v, ok := known[key]; ok { + return featureSpecAtEmulationVersion(v, emulationVersion).Default + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGate", key)) +} + // Enabled returns true if the key is enabled. If the key is not known, this call will panic. func (f *featureGate) Enabled(key Feature) bool { - if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { - return v + // TODO: ideally we should lock the feature gate in this call to be safe, need to evaluate how much performance impact locking would have. + v := featureEnabled(key, f.enabled.Load().(map[Feature]bool), f.known.Load().(map[Feature]VersionedSpecs), f.EmulationVersion()) + f.unsafeRecordQueried(key) + return v +} + +func (f *featureGate) featureSpecAtEmulationVersion(v VersionedSpecs) *FeatureSpec { + return featureSpecAtEmulationVersion(v, f.EmulationVersion()) +} + +func featureSpecAtEmulationVersion(v VersionedSpecs, emulationVersion *version.Version) *FeatureSpec { + i := len(v) - 1 + for ; i >= 0; i-- { + if v[i].Version.GreaterThan(emulationVersion) { + continue + } + return &v[i] } - if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok { - return v.Default + return &FeatureSpec{ + Default: false, + PreRelease: PreAlpha, + Version: version.MajorMinor(0, 0), } +} - panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName)) +// Close sets closed to true, and prevents subsequent calls to Add +func (f *featureGate) Close() { + f.lock.Lock() + f.closed = true + f.lock.Unlock() } // AddFlag adds a flag for setting global feature gates to the specified FlagSet. func (f *featureGate) AddFlag(fs *pflag.FlagSet) { - f.lock.Lock() // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? // Not all components expose a feature gates flag using this AddFlag method, and // in the future, all components will completely stop exposing a feature gates flag, // in favor of componentconfig. - f.closed = true - f.lock.Unlock() + f.Close() known := f.KnownFeatures() fs.Var(f, flagName, ""+ @@ -377,32 +639,50 @@ func (f *featureGate) AddMetrics() { } // KnownFeatures returns a slice of strings describing the FeatureGate's known features. -// Deprecated and GA features are hidden from the list. +// preAlpha, Deprecated and GA features are hidden from the list. func (f *featureGate) KnownFeatures() []string { var known []string - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - if v.PreRelease == GA || v.PreRelease == Deprecated { + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + if k == "AllAlpha" || k == "AllBeta" { + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v[0].PreRelease, v[0].Default)) + continue + } + featureSpec := f.featureSpecAtEmulationVersion(v) + if featureSpec.PreRelease == GA || featureSpec.PreRelease == Deprecated || featureSpec.PreRelease == PreAlpha { continue } - known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default)) + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, featureSpec.PreRelease, featureSpec.Default)) } sort.Strings(known) return known } +// DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides, +// and resets all the enabled status of the new feature gate. +// This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate. +func (f *featureGate) DeepCopyAndReset() MutableVersionedFeatureGate { + fg := NewVersionedFeatureGate(f.EmulationVersion()) + known := f.GetAllVersioned() + fg.known.Store(known) + return fg +} + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be // set on the copy without mutating the original. This is useful for validating // config against potential feature gate changes before committing those changes. -func (f *featureGate) DeepCopy() MutableFeatureGate { +func (f *featureGate) DeepCopy() MutableVersionedFeatureGate { + f.lock.Lock() + defer f.lock.Unlock() // Copy existing state. - known := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - known[k] = v - } + known := f.GetAllVersioned() enabled := map[Feature]bool{} for k, v := range f.enabled.Load().(map[Feature]bool) { enabled[k] = v } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } // Construct a new featureGate around the copied state. // Note that specialFeatures is treated as immutable by convention, @@ -411,9 +691,48 @@ func (f *featureGate) DeepCopy() MutableFeatureGate { special: specialFeatures, closed: f.closed, } - + fg.emulationVersion.Store(f.EmulationVersion()) fg.known.Store(known) fg.enabled.Store(enabled) - + fg.enabledRaw.Store(enabledRaw) + fg.queriedFeatures.Store(sets.Set[Feature]{}) return fg } + +// ExplicitlySet returns true if the feature value is explicitly set instead of +// being derived from the default values or special features. +func (f *featureGate) ExplicitlySet(name Feature) bool { + enabledRaw := f.enabledRaw.Load().(map[string]bool) + _, ok := enabledRaw[string(name)] + return ok +} + +// ResetFeatureValueToDefault resets the value of the feature back to the default value. +func (f *featureGate) ResetFeatureValueToDefault(name Feature) error { + f.lock.Lock() + defer f.lock.Unlock() + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + _, inEnabled := enabled[name] + if inEnabled { + delete(enabled, name) + } + _, inEnabledRaw := enabledRaw[string(name)] + if inEnabledRaw { + delete(enabledRaw, string(name)) + } + // some features could be in enabled map but not enabledRaw map, + // for example some Alpha feature when AllAlpha is set. + if inEnabledRaw && !inEnabled { + return fmt.Errorf("feature:%s was explicitly set, but not in enabled map", name) + } + f.enabled.Store(enabled) + f.enabledRaw.Store(enabledRaw) + return nil +} diff --git a/vendor/k8s.io/component-base/logs/api/v1/options.go b/vendor/k8s.io/component-base/logs/api/v1/options.go index 4abcc1de81..754443bf6f 100644 --- a/vendor/k8s.io/component-base/logs/api/v1/options.go +++ b/vendor/k8s.io/component-base/logs/api/v1/options.go @@ -282,6 +282,7 @@ func apply(c *LoggingConfiguration, options *LoggingOptions, featureGate feature if err := loggingFlags.Lookup("vmodule").Value.Set(VModuleConfigurationPflag(&c.VModule).String()); err != nil { return fmt.Errorf("internal error while setting klog vmodule: %v", err) } + setSlogDefaultLogger() klog.StartFlushDaemon(c.FlushFrequency.Duration.Duration) klog.EnableContextualLogging(p.ContextualLoggingEnabled) return nil diff --git a/vendor/k8s.io/component-base/logs/api/v1/options_no_slog.go b/vendor/k8s.io/component-base/logs/api/v1/options_no_slog.go new file mode 100644 index 0000000000..816948e0a6 --- /dev/null +++ b/vendor/k8s.io/component-base/logs/api/v1/options_no_slog.go @@ -0,0 +1,24 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +func setSlogDefaultLogger() { + // Do nothing when build with Go < 1.21. +} diff --git a/vendor/k8s.io/component-base/logs/api/v1/options_slog.go b/vendor/k8s.io/component-base/logs/api/v1/options_slog.go new file mode 100644 index 0000000000..31765e644d --- /dev/null +++ b/vendor/k8s.io/component-base/logs/api/v1/options_slog.go @@ -0,0 +1,37 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "log/slog" + + "github.com/go-logr/logr" + "k8s.io/klog/v2" +) + +// setSlogDefaultLogger sets the global slog default logger to the same default +// that klog currently uses. +func setSlogDefaultLogger() { + // klog.Background() always returns a valid logr.Logger, regardless of + // how logging was configured. We just need to turn it into a + // slog.Handler. SetDefault then needs a slog.Logger. + handler := logr.ToSlogHandler(klog.Background()) + slog.SetDefault(slog.New(handler)) +} diff --git a/vendor/k8s.io/component-base/metrics/testutil/metrics.go b/vendor/k8s.io/component-base/metrics/testutil/metrics.go index df3f8ee0c1..c595f55d64 100644 --- a/vendor/k8s.io/component-base/metrics/testutil/metrics.go +++ b/vendor/k8s.io/component-base/metrics/testutil/metrics.go @@ -70,7 +70,7 @@ func NewMetrics() Metrics { // ParseMetrics parses Metrics from data returned from prometheus endpoint func ParseMetrics(data string, output *Metrics) error { - dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText) + dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.NewFormat(expfmt.TypeTextPlain)) decoder := expfmt.SampleDecoder{ Dec: dec, Opts: &expfmt.DecodeOptions{}, diff --git a/vendor/k8s.io/component-base/tracing/utils.go b/vendor/k8s.io/component-base/tracing/utils.go index b5141f0338..dde7a5b28d 100644 --- a/vendor/k8s.io/component-base/tracing/utils.go +++ b/vendor/k8s.io/component-base/tracing/utils.go @@ -27,6 +27,7 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" oteltrace "go.opentelemetry.io/otel/trace" + noopoteltrace "go.opentelemetry.io/otel/trace/noop" "k8s.io/client-go/transport" "k8s.io/component-base/tracing/api/v1" @@ -47,7 +48,7 @@ func (n *noopTracerProvider) Shutdown(context.Context) error { } func NewNoopTracerProvider() TracerProvider { - return &noopTracerProvider{TracerProvider: oteltrace.NewNoopTracerProvider()} + return &noopTracerProvider{TracerProvider: noopoteltrace.NewTracerProvider()} } // NewProvider creates a TracerProvider in a component, and enforces recommended tracing behavior diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go index b753b7d191..6015467824 100644 --- a/vendor/k8s.io/component-base/version/base.go +++ b/vendor/k8s.io/component-base/version/base.go @@ -61,3 +61,10 @@ var ( buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') ) + +const ( + // DefaultKubeBinaryVersion is the hard coded k8 binary version based on the latest K8s release. + // It is supposed to be consistent with gitMajor and gitMinor, except for local tests, where gitMajor and gitMinor are "". + // Should update for each minor release! + DefaultKubeBinaryVersion = "1.31" +) diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 026be9e3b1..47ec9466a6 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error { return nil } -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - var logging loggingT var commandLine flag.FlagSet @@ -486,7 +479,7 @@ type settings struct { // Access to all of the following fields must be protected via a mutex. // file holds writer for each of the log types. - file [severity.NumSeverity]flushSyncWriter + file [severity.NumSeverity]io.Writer // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration @@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, buffer.PutBuffer(b) } -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() for s := severity.FatalLog; s >= severity.InfoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb + logging.file[s] = w } } @@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) { if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.file[sev] = w } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs @@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) { logExitFunc(err) return } - l.flushAll() + needToSync := l.flushAll() + l.syncAll(needToSync) OsExit(2) } @@ -1028,10 +999,6 @@ type syncBuffer struct { maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { @@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) { // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() - l.flushAll() + needToSync := l.flushAll() l.mu.Unlock() + // Some environments are slow when syncing and holding the lock might cause contention. + l.syncAll(needToSync) } -// flushAll flushes all the logs and attempts to "sync" their data to disk. +// flushAll flushes all the logs // l.mu is held. -func (l *loggingT) flushAll() { +// +// The result is the number of files which need to be synced and the pointers to them. +func (l *loggingT) flushAll() fileArray { + var needToSync fileArray + // Flush from fatal down, in case there's trouble flushing. for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] - if file != nil { - _ = file.Flush() // ignore error - _ = file.Sync() // ignore error + if sb, ok := file.(*syncBuffer); ok && sb.file != nil { + _ = sb.Flush() // ignore error + needToSync.files[needToSync.num] = sb.file + needToSync.num++ } } if logging.loggerOptions.flush != nil { logging.loggerOptions.flush() } + return needToSync +} + +type fileArray struct { + num int + files [severity.NumSeverity]*os.File +} + +// syncAll attempts to "sync" their data to disk. +func (l *loggingT) syncAll(needToSync fileArray) { + // Flush from fatal down, in case there's trouble flushing. + for i := 0; i < needToSync.num; i++ { + _ = needToSync.files[i].Sync() // ignore error + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger.go b/vendor/k8s.io/klog/v2/textlogger/textlogger.go index fb240c53ca..0b55a29942 100644 --- a/vendor/k8s.io/klog/v2/textlogger/textlogger.go +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger.go @@ -1,6 +1,6 @@ /* Copyright 2019 The Kubernetes Authors. -Copyright 2020 Intel Coporation. +Copyright 2020 Intel Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go index b9993f4ca3..dd286e1f21 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go @@ -20,6 +20,7 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=apiregistration.k8s.io // +k8s:defaulter-gen=TypeMeta +// +k8s:prerelease-lifecycle-gen=true // Package v1 contains the API Registration API, which is responsible for // registering an API `Group`/`Version` with another kubernetes like API server. diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto index 8413a158b2..5571387ef8 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto @@ -34,7 +34,7 @@ message APIService { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec contains information for locating and communicating with a server optional APIServiceSpec spec = 2; @@ -54,7 +54,7 @@ message APIServiceCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -70,7 +70,7 @@ message APIServiceList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of APIService repeated APIService items = 2; diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go index 14f71c7044..fe5f64c0e1 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go @@ -19,6 +19,7 @@ package v1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // APIServiceList is a list of APIService objects. type APIServiceList struct { @@ -145,6 +146,7 @@ type APIServiceStatus struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // APIService represents a server for a particular GroupVersion. // Name must be "version.group". diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..14d3e1f48d --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIService) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIServiceList) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index dca9b1ed63..938039f4d8 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -34,7 +34,7 @@ message APIService { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec contains information for locating and communicating with a server optional APIServiceSpec spec = 2; @@ -54,7 +54,7 @@ message APIServiceCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -70,7 +70,7 @@ message APIServiceList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of APIService repeated APIService items = 2; diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go index ff7421726d..d425ae7add 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "sync" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -37,7 +38,8 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes" "k8s.io/client-go/transport" - "k8s.io/component-base/version" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/component-base/tracing" v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" v1helper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" @@ -49,11 +51,16 @@ import ( openapiaggregator "k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator" openapiv3controller "k8s.io/kube-aggregator/pkg/controllers/openapiv3" openapiv3aggregator "k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator" - statuscontrollers "k8s.io/kube-aggregator/pkg/controllers/status" + localavailability "k8s.io/kube-aggregator/pkg/controllers/status/local" + availabilitymetrics "k8s.io/kube-aggregator/pkg/controllers/status/metrics" + remoteavailability "k8s.io/kube-aggregator/pkg/controllers/status/remote" apiservicerest "k8s.io/kube-aggregator/pkg/registry/apiservice/rest" openapicommon "k8s.io/kube-openapi/pkg/common" ) +// making sure we only register metrics once into legacy registry +var registerIntoLegacyRegistryOnce sync.Once + func init() { // we need to add the options (like ListOptions) to empty v1 metav1.AddToGroupVersion(aggregatorscheme.Scheme, schema.GroupVersion{Group: "", Version: "v1"}) @@ -77,11 +84,6 @@ const ( // ExtraConfig represents APIServices-specific configuration type ExtraConfig struct { - // PeerCAFile is the ca bundle used by this kube-apiserver to verify peer apiservers' - // serving certs when routing a request to the peer in the case the request can not be served - // locally due to version skew. - PeerCAFile string - // PeerAdvertiseAddress is the IP for this kube-apiserver which is used by peer apiservers to route a request // to this apiserver. This happens in cases where the peer is not able to serve the request due to // version skew. If unset, AdvertiseAddress/BindAddress will be used. @@ -100,6 +102,12 @@ type ExtraConfig struct { ServiceResolver ServiceResolver RejectForwardingRedirects bool + + // DisableRemoteAvailableConditionController disables the controller that updates the Available conditions for + // remote APIServices via querying endpoints of the referenced services. In generic controlplane use-cases, + // the concept of services and endpoints might differ, and might require another implementation of this + // controller. Local APIService are reconciled nevertheless. + DisableRemoteAvailableConditionController bool } // Config represents the configuration needed to create an APIAggregator. @@ -120,7 +128,7 @@ type CompletedConfig struct { } type runnable interface { - Run(stopCh <-chan struct{}) error + RunWithContext(ctx context.Context) error } // preparedGenericAPIServer is a private wrapper that enforces a call of PrepareRun() before Run can be invoked. @@ -174,6 +182,9 @@ type APIAggregator struct { // rejectForwardingRedirects is whether to allow to forward redirect response rejectForwardingRedirects bool + + // tracerProvider is used to wrap the proxy transport and handler with tracing + tracerProvider tracing.TracerProvider } // Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. @@ -186,8 +197,6 @@ func (cfg *Config) Complete() CompletedConfig { // the kube aggregator wires its own discovery mechanism // TODO eventually collapse this by extracting all of the discovery out c.GenericConfig.EnableDiscovery = false - version := version.Get() - c.GenericConfig.Version = &version return CompletedConfig{&c} } @@ -244,10 +253,11 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg openAPIV3Config: c.GenericConfig.OpenAPIV3Config, proxyCurrentCertKeyContent: func() (bytes []byte, bytes2 []byte) { return nil, nil }, rejectForwardingRedirects: c.ExtraConfig.RejectForwardingRedirects, + tracerProvider: c.GenericConfig.TracerProvider, } // used later to filter the served resource by those that have expired. - resourceExpirationEvaluator, err := genericapiserver.NewResourceExpirationEvaluator(*c.GenericConfig.Version) + resourceExpirationEvaluator, err := genericapiserver.NewResourceExpirationEvaluator(s.GenericAPIServer.EffectiveVersion.EmulationVersion()) if err != nil { return nil, err } @@ -287,61 +297,79 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg } // We are passing the context to ProxyCerts.RunOnce as it needs to implement RunOnce(ctx) however the // context is not used at all. So passing a empty context shouldn't be a problem - ctx := context.TODO() - if err := aggregatorProxyCerts.RunOnce(ctx); err != nil { + if err := aggregatorProxyCerts.RunOnce(context.Background()); err != nil { return nil, err } aggregatorProxyCerts.AddListener(apiserviceRegistrationController) s.proxyCurrentCertKeyContent = aggregatorProxyCerts.CurrentCertKeyContent s.GenericAPIServer.AddPostStartHookOrDie("aggregator-reload-proxy-client-cert", func(postStartHookContext genericapiserver.PostStartHookContext) error { - // generate a context from stopCh. This is to avoid modifying files which are relying on apiserver - // TODO: See if we can pass ctx to the current method - ctx, cancel := context.WithCancel(context.Background()) - go func() { - select { - case <-postStartHookContext.StopCh: - cancel() // stopCh closed, so cancel our context - case <-ctx.Done(): - } - }() - go aggregatorProxyCerts.Run(ctx, 1) + go aggregatorProxyCerts.Run(postStartHookContext, 1) return nil }) } - availableController, err := statuscontrollers.NewAvailableConditionController( + s.GenericAPIServer.AddPostStartHookOrDie("start-kube-aggregator-informers", func(context genericapiserver.PostStartHookContext) error { + informerFactory.Start(context.Done()) + c.GenericConfig.SharedInformerFactory.Start(context.Done()) + return nil + }) + + // create shared (remote and local) availability metrics + // TODO: decouple from legacyregistry + metrics := availabilitymetrics.New() + registerIntoLegacyRegistryOnce.Do(func() { err = metrics.Register(legacyregistry.Register, legacyregistry.CustomRegister) }) + if err != nil { + return nil, err + } + + // always run local availability controller + local, err := localavailability.New( informerFactory.Apiregistration().V1().APIServices(), - c.GenericConfig.SharedInformerFactory.Core().V1().Services(), - c.GenericConfig.SharedInformerFactory.Core().V1().Endpoints(), apiregistrationClient.ApiregistrationV1(), - proxyTransportDial, - (func() ([]byte, []byte))(s.proxyCurrentCertKeyContent), - s.serviceResolver, + metrics, ) if err != nil { return nil, err } - - s.GenericAPIServer.AddPostStartHookOrDie("start-kube-aggregator-informers", func(context genericapiserver.PostStartHookContext) error { - informerFactory.Start(context.StopCh) - c.GenericConfig.SharedInformerFactory.Start(context.StopCh) + s.GenericAPIServer.AddPostStartHookOrDie("apiservice-status-local-available-controller", func(context genericapiserver.PostStartHookContext) error { + // if we end up blocking for long periods of time, we may need to increase workers. + go local.Run(5, context.Done()) return nil }) + + // conditionally run remote availability controller. This could be replaced in certain + // generic controlplane use-cases where there is another concept of services and/or endpoints. + if !c.ExtraConfig.DisableRemoteAvailableConditionController { + remote, err := remoteavailability.New( + informerFactory.Apiregistration().V1().APIServices(), + c.GenericConfig.SharedInformerFactory.Core().V1().Services(), + c.GenericConfig.SharedInformerFactory.Core().V1().Endpoints(), + apiregistrationClient.ApiregistrationV1(), + proxyTransportDial, + (func() ([]byte, []byte))(s.proxyCurrentCertKeyContent), + s.serviceResolver, + metrics, + ) + if err != nil { + return nil, err + } + s.GenericAPIServer.AddPostStartHookOrDie("apiservice-status-remote-available-controller", func(context genericapiserver.PostStartHookContext) error { + // if we end up blocking for long periods of time, we may need to increase workers. + go remote.Run(5, context.Done()) + return nil + }) + } + s.GenericAPIServer.AddPostStartHookOrDie("apiservice-registration-controller", func(context genericapiserver.PostStartHookContext) error { - go apiserviceRegistrationController.Run(context.StopCh, apiServiceRegistrationControllerInitiated) + go apiserviceRegistrationController.Run(context.Done(), apiServiceRegistrationControllerInitiated) select { - case <-context.StopCh: + case <-context.Done(): case <-apiServiceRegistrationControllerInitiated: } return nil }) - s.GenericAPIServer.AddPostStartHookOrDie("apiservice-status-available-controller", func(context genericapiserver.PostStartHookContext) error { - // if we end up blocking for long periods of time, we may need to increase workers. - go availableController.Run(5, context.StopCh) - return nil - }) if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) { s.discoveryAggregationController = NewDiscoveryManager( @@ -355,7 +383,7 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg // Discovery aggregation depends on the apiservice registration controller // having the full list of APIServices already synced select { - case <-context.StopCh: + case <-context.Done(): return nil // Context cancelled, should abort/clean goroutines case <-apiServiceRegistrationControllerInitiated: @@ -366,10 +394,10 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg // When discovery is ready, all APIServices will be present, with APIServices // that have not successfully synced discovery to be present but marked as Stale. discoverySyncedCh := make(chan struct{}) - go s.discoveryAggregationController.Run(context.StopCh, discoverySyncedCh) + go s.discoveryAggregationController.Run(context.Done(), discoverySyncedCh) select { - case <-context.StopCh: + case <-context.Done(): return nil // Context cancelled, should abort/clean goroutines case <-discoverySyncedCh: @@ -401,7 +429,7 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg return false, err } return true, nil - }, hookContext.StopCh); err != nil { + }, hookContext.Done()); err != nil { return fmt.Errorf("failed to wait for apiserver-identity lease %s to be created: %v", s.GenericAPIServer.APIServerID, err) } @@ -417,14 +445,14 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg // to register all built-in resources when the generic apiservers install APIs. s.GenericAPIServer.StorageVersionManager.UpdateStorageVersions(hookContext.LoopbackClientConfig, s.GenericAPIServer.APIServerID) return false, nil - }, hookContext.StopCh) + }, hookContext.Done()) // Once the storage version updater finishes the first round of update, // the PostStartHook will return to unblock /healthz. The handler chain // won't block write requests anymore. Check every second since it's not // expensive. wait.PollImmediateUntil(1*time.Second, func() (bool, error) { return s.GenericAPIServer.StorageVersionManager.Completed(), nil - }, hookContext.StopCh) + }, hookContext.Done()) return nil }) } @@ -438,14 +466,14 @@ func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) { // add post start hook before generic PrepareRun in order to be before /healthz installation if s.openAPIConfig != nil { s.GenericAPIServer.AddPostStartHookOrDie("apiservice-openapi-controller", func(context genericapiserver.PostStartHookContext) error { - go s.openAPIAggregationController.Run(context.StopCh) + go s.openAPIAggregationController.Run(context.Done()) return nil }) } if s.openAPIV3Config != nil { s.GenericAPIServer.AddPostStartHookOrDie("apiservice-openapiv3-controller", func(context genericapiserver.PostStartHookContext) error { - go s.openAPIV3AggregationController.Run(context.StopCh) + go s.openAPIV3AggregationController.Run(context.Done()) return nil }) } @@ -484,8 +512,8 @@ func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) { return preparedAPIAggregator{APIAggregator: s, runnable: prepared}, nil } -func (s preparedAPIAggregator) Run(stopCh <-chan struct{}) error { - return s.runnable.Run(stopCh) +func (s preparedAPIAggregator) Run(ctx context.Context) error { + return s.runnable.RunWithContext(ctx) } // AddAPIService adds an API service. It is not thread-safe, so only call it on one thread at a time please. @@ -523,6 +551,7 @@ func (s *APIAggregator) AddAPIService(apiService *v1.APIService) error { proxyTransportDial: s.proxyTransportDial, serviceResolver: s.serviceResolver, rejectForwardingRedirects: s.rejectForwardingRedirects, + tracerProvider: s.tracerProvider, } proxyHandler.updateAPIService(apiService) if s.openAPIAggregationController != nil { diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go index 52df3cb25f..d70e906f98 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go @@ -51,7 +51,7 @@ type APIServiceRegistrationController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } var _ dynamiccertificates.Listener = &APIServiceRegistrationController{} @@ -62,7 +62,10 @@ func NewAPIServiceRegistrationController(apiServiceInformer informers.APIService apiHandlerManager: apiHandlerManager, apiServiceLister: apiServiceInformer.Lister(), apiServiceSynced: apiServiceInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "APIServiceRegistrationController"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "APIServiceRegistrationController"}, + ), } apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -143,7 +146,7 @@ func (c *APIServiceRegistrationController) processNextWorkItem() bool { } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go b/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go index c65454e7e5..68199b6b30 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go @@ -103,7 +103,7 @@ type discoveryManager struct { // It is important that the reconciler for this queue does not excessively // contact the apiserver if a key was enqueued before the server was last // contacted. - dirtyAPIServiceQueue workqueue.RateLimitingInterface + dirtyAPIServiceQueue workqueue.TypedRateLimitingInterface[string] // Merged handler which stores all known groupversions mergedDiscoveryHandler discoveryendpoint.ResourceManager @@ -197,8 +197,11 @@ func NewDiscoveryManager( mergedDiscoveryHandler: target, apiServices: make(map[string]groupVersionInfo), cachedResults: make(map[serviceKey]cachedResult), - dirtyAPIServiceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "discovery-manager"), - codecs: codecs, + dirtyAPIServiceQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "discovery-manager"}, + ), + codecs: codecs, } } @@ -488,7 +491,7 @@ func (dm *discoveryManager) Run(stopCh <-chan struct{}, discoverySyncedCh chan<- func() { defer dm.dirtyAPIServiceQueue.Done(next) - if err := dm.syncAPIService(next.(string)); err != nil { + if err := dm.syncAPIService(next); err != nil { dm.dirtyAPIServiceQueue.AddRateLimited(next) } else { dm.dirtyAPIServiceQueue.Forget(next) diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index 8a647a6451..a59974f300 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -27,10 +27,13 @@ import ( "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" endpointmetrics "k8s.io/apiserver/pkg/endpoints/metrics" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" apiserverproxyutil "k8s.io/apiserver/pkg/util/proxy" "k8s.io/apiserver/pkg/util/x509metrics" "k8s.io/client-go/transport" + "k8s.io/component-base/tracing" "k8s.io/klog/v2" apiregistrationv1api "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper" @@ -59,6 +62,9 @@ type proxyHandler struct { // reject to forward redirect response rejectForwardingRedirects bool + + // tracerProvider is used to wrap the proxy transport and handler with tracing + tracerProvider tracing.TracerProvider } type proxyHandlingInfo struct { @@ -155,6 +161,11 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { proxyRoundTripper = transport.NewAuthProxyRoundTripper(user.GetName(), user.GetGroups(), user.GetExtra(), proxyRoundTripper) + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) && !upgrade { + tracingWrapper := tracing.WrapperFor(r.tracerProvider) + proxyRoundTripper = tracingWrapper(proxyRoundTripper) + } + // If we are upgrading, then the upgrade path tries to use this request with the TLS config we provide, but it does // NOT use the proxyRoundTripper. It's a direct dial that bypasses the proxyRoundTripper. This means that we have to // attach the "correct" user headers to the request ahead of time. diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go index 25bf6ea44b..b248437dc9 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go @@ -20,12 +20,11 @@ package v1 import ( "context" - "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" ) @@ -40,6 +39,7 @@ type APIServicesGetter interface { type APIServiceInterface interface { Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (*v1.APIService, error) Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -52,133 +52,18 @@ type APIServiceInterface interface { // aPIServices implements APIServiceInterface type aPIServices struct { - client rest.Interface + *gentype.ClientWithList[*v1.APIService, *v1.APIServiceList] } // newAPIServices returns a APIServices func newAPIServices(c *ApiregistrationV1Client) *aPIServices { return &aPIServices{ - client: c.RESTClient(), + gentype.NewClientWithList[*v1.APIService, *v1.APIServiceList]( + "apiservices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.APIService { return &v1.APIService{} }, + func() *v1.APIServiceList { return &v1.APIServiceList{} }), } } - -// Get takes name of the aPIService, and returns the corresponding aPIService object, and an error if there is any. -func (c *aPIServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Get(). - Resource("apiservices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of APIServices that match those selectors. -func (c *aPIServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.APIServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.APIServiceList{} - err = c.client.Get(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested aPIServices. -func (c *aPIServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Post(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Put(). - Resource("apiservices"). - Name(aPIService.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Put(). - Resource("apiservices"). - Name(aPIService.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the aPIService and deletes it. Returns an error if one occurs. -func (c *aPIServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("apiservices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *aPIServices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("apiservices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched aPIService. -func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Patch(pt). - Resource("apiservices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go index a5e76412e1..9e5f982bf6 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go @@ -20,12 +20,11 @@ package v1beta1 import ( "context" - "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" v1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" ) @@ -40,6 +39,7 @@ type APIServicesGetter interface { type APIServiceInterface interface { Create(ctx context.Context, aPIService *v1beta1.APIService, opts v1.CreateOptions) (*v1beta1.APIService, error) Update(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (*v1beta1.APIService, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (*v1beta1.APIService, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -52,133 +52,18 @@ type APIServiceInterface interface { // aPIServices implements APIServiceInterface type aPIServices struct { - client rest.Interface + *gentype.ClientWithList[*v1beta1.APIService, *v1beta1.APIServiceList] } // newAPIServices returns a APIServices func newAPIServices(c *ApiregistrationV1beta1Client) *aPIServices { return &aPIServices{ - client: c.RESTClient(), + gentype.NewClientWithList[*v1beta1.APIService, *v1beta1.APIServiceList]( + "apiservices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.APIService { return &v1beta1.APIService{} }, + func() *v1beta1.APIServiceList { return &v1beta1.APIServiceList{} }), } } - -// Get takes name of the aPIService, and returns the corresponding aPIService object, and an error if there is any. -func (c *aPIServices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.APIService, err error) { - result = &v1beta1.APIService{} - err = c.client.Get(). - Resource("apiservices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of APIServices that match those selectors. -func (c *aPIServices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.APIServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.APIServiceList{} - err = c.client.Get(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested aPIServices. -func (c *aPIServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Create(ctx context.Context, aPIService *v1beta1.APIService, opts v1.CreateOptions) (result *v1beta1.APIService, err error) { - result = &v1beta1.APIService{} - err = c.client.Post(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Update(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (result *v1beta1.APIService, err error) { - result = &v1beta1.APIService{} - err = c.client.Put(). - Resource("apiservices"). - Name(aPIService.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (result *v1beta1.APIService, err error) { - result = &v1beta1.APIService{} - err = c.client.Put(). - Resource("apiservices"). - Name(aPIService.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the aPIService and deletes it. Returns an error if one occurs. -func (c *aPIServices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("apiservices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *aPIServices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("apiservices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched aPIService. -func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.APIService, err error) { - result = &v1beta1.APIService{} - err = c.client.Patch(pt). - Resource("apiservices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go b/vendor/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go index 3cf42a74bd..9774cfb885 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go @@ -228,6 +228,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1/apiservice.go index 5af77c7f76..ccac6e2f23 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1/apiservice.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" ) @@ -39,30 +39,10 @@ type APIServiceLister interface { // aPIServiceLister implements the APIServiceLister interface. type aPIServiceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.APIService] } // NewAPIServiceLister returns a new APIServiceLister. func NewAPIServiceLister(indexer cache.Indexer) APIServiceLister { - return &aPIServiceLister{indexer: indexer} -} - -// List lists all APIServices in the indexer. -func (s *aPIServiceLister) List(selector labels.Selector) (ret []*v1.APIService, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.APIService)) - }) - return ret, err -} - -// Get retrieves the APIService from the index for a given name. -func (s *aPIServiceLister) Get(name string) (*v1.APIService, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("apiservice"), name) - } - return obj.(*v1.APIService), nil + return &aPIServiceLister{listers.New[*v1.APIService](indexer, v1.Resource("apiservice"))} } diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go index 8628b80d03..b15372613b 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go @@ -19,8 +19,8 @@ limitations under the License. package v1beta1 import ( - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" v1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" ) @@ -39,30 +39,10 @@ type APIServiceLister interface { // aPIServiceLister implements the APIServiceLister interface. type aPIServiceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.APIService] } // NewAPIServiceLister returns a new APIServiceLister. func NewAPIServiceLister(indexer cache.Indexer) APIServiceLister { - return &aPIServiceLister{indexer: indexer} -} - -// List lists all APIServices in the indexer. -func (s *aPIServiceLister) List(selector labels.Selector) (ret []*v1beta1.APIService, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.APIService)) - }) - return ret, err -} - -// Get retrieves the APIService from the index for a given name. -func (s *aPIServiceLister) Get(name string) (*v1beta1.APIService, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("apiservice"), name) - } - return obj.(*v1beta1.APIService), nil + return &aPIServiceLister{listers.New[*v1beta1.APIService](indexer, v1beta1.Resource("apiservice"))} } diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go b/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go index 69f32f4aa8..3643df8003 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go @@ -47,7 +47,7 @@ const ( // them if necessary. type AggregationController struct { openAPIAggregationManager aggregator.SpecAggregator - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] downloader *aggregator.Downloader // To allow injection for testing. @@ -58,9 +58,9 @@ type AggregationController struct { func NewAggregationController(downloader *aggregator.Downloader, openAPIAggregationManager aggregator.SpecAggregator) *AggregationController { c := &AggregationController{ openAPIAggregationManager: openAPIAggregationManager, - queue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemExponentialFailureRateLimiter(successfulUpdateDelay, failedUpdateMaxExpDelay), - "open_api_aggregation_controller", + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_aggregation_controller"}, ), downloader: downloader, } @@ -97,7 +97,7 @@ func (c *AggregationController) processNextWorkItem() bool { } klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key) - action, err := c.syncHandler(key.(string)) + action, err := c.syncHandler(key) if err != nil { utilruntime.HandleError(fmt.Errorf("loading OpenAPI spec for %q failed with: %v", key, err)) } diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go b/vendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go index 0f5e37c452..d91222824c 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go @@ -46,7 +46,7 @@ const ( // AggregationController periodically checks the list of group-versions handled by each APIService and updates the discovery page periodically type AggregationController struct { openAPIAggregationManager aggregator.SpecProxier - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // To allow injection for testing. syncHandler func(key string) (syncAction, error) @@ -56,9 +56,9 @@ type AggregationController struct { func NewAggregationController(openAPIAggregationManager aggregator.SpecProxier) *AggregationController { c := &AggregationController{ openAPIAggregationManager: openAPIAggregationManager, - queue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemExponentialFailureRateLimiter(successfulUpdateDelay, failedUpdateMaxExpDelay), - "open_api_v3_aggregation_controller", + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_v3_aggregation_controller"}, ), } @@ -98,7 +98,7 @@ func (c *AggregationController) processNextWorkItem() bool { return false } - if aggregator.IsLocalAPIService(key.(string)) { + if aggregator.IsLocalAPIService(key) { // for local delegation targets that are aggregated once per second, log at // higher level to avoid flooding the log klog.V(6).Infof("OpenAPI AggregationController: Processing item %s", key) @@ -106,7 +106,7 @@ func (c *AggregationController) processNextWorkItem() bool { klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key) } - action, err := c.syncHandler(key.(string)) + action, err := c.syncHandler(key) if err == nil { c.queue.Forget(key) } else { @@ -115,7 +115,7 @@ func (c *AggregationController) processNextWorkItem() bool { switch action { case syncRequeue: - if aggregator.IsLocalAPIService(key.(string)) { + if aggregator.IsLocalAPIService(key) { klog.V(7).Infof("OpenAPI AggregationController: action for local item %s: Requeue after %s.", key, successfulUpdateDelayLocal) c.queue.AddAfter(key, successfulUpdateDelayLocal) } else { diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/local/local_available_controller.go b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/local/local_available_controller.go new file mode 100644 index 0000000000..982ff69feb --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/local/local_available_controller.go @@ -0,0 +1,227 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package external + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper" + apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + informers "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1" + listers "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1" + "k8s.io/kube-aggregator/pkg/controllers" + availabilitymetrics "k8s.io/kube-aggregator/pkg/controllers/status/metrics" +) + +// AvailableConditionController handles checking the availability of registered local API services. +type AvailableConditionController struct { + apiServiceClient apiregistrationclient.APIServicesGetter + + apiServiceLister listers.APIServiceLister + apiServiceSynced cache.InformerSynced + + // To allow injection for testing. + syncFn func(key string) error + + queue workqueue.TypedRateLimitingInterface[string] + + // metrics registered into legacy registry + metrics *availabilitymetrics.Metrics +} + +// New returns a new local availability AvailableConditionController. +func New( + apiServiceInformer informers.APIServiceInformer, + apiServiceClient apiregistrationclient.APIServicesGetter, + metrics *availabilitymetrics.Metrics, +) (*AvailableConditionController, error) { + c := &AvailableConditionController{ + apiServiceClient: apiServiceClient, + apiServiceLister: apiServiceInformer.Lister(), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + // We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the + // service network, it is possible for an external, non-watchable factor to affect availability. This keeps + // the maximum disruption time to a minimum, but it does prevent hot loops. + workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "LocalAvailabilityController"}, + ), + metrics: metrics, + } + + // resync on this one because it is low cardinality and rechecking the actual discovery + // allows us to detect health in a more timely fashion when network connectivity to + // nodes is snipped, but the network still attempts to route there. See + // https://github.com/openshift/origin/issues/17159#issuecomment-341798063 + apiServiceHandler, _ := apiServiceInformer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.addAPIService, + UpdateFunc: c.updateAPIService, + DeleteFunc: c.deleteAPIService, + }, + 30*time.Second) + c.apiServiceSynced = apiServiceHandler.HasSynced + + c.syncFn = c.sync + + return c, nil +} + +func (c *AvailableConditionController) sync(key string) error { + originalAPIService, err := c.apiServiceLister.Get(key) + if apierrors.IsNotFound(err) { + c.metrics.ForgetAPIService(key) + return nil + } + if err != nil { + return err + } + + if originalAPIService.Spec.Service != nil { + // this controller only handles local APIServices + return nil + } + + // local API services are always considered available + apiService := originalAPIService.DeepCopy() + apiregistrationv1apihelper.SetAPIServiceCondition(apiService, apiregistrationv1apihelper.NewLocalAvailableAPIServiceCondition()) + _, err = c.updateAPIServiceStatus(originalAPIService, apiService) + return err +} + +// updateAPIServiceStatus only issues an update if a change is detected. We have a tight resync loop to quickly detect dead +// apiservices. Doing that means we don't want to quickly issue no-op updates. +func (c *AvailableConditionController) updateAPIServiceStatus(originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) { + // update this metric on every sync operation to reflect the actual state + c.metrics.SetUnavailableGauge(newAPIService) + + if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) { + return newAPIService, nil + } + + orig := apiregistrationv1apihelper.GetAPIServiceConditionByType(originalAPIService, apiregistrationv1.Available) + now := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available) + unknown := apiregistrationv1.APIServiceCondition{ + Type: apiregistrationv1.Available, + Status: apiregistrationv1.ConditionUnknown, + } + if orig == nil { + orig = &unknown + } + if now == nil { + now = &unknown + } + if *orig != *now { + klog.V(2).InfoS("changing APIService availability", "name", newAPIService.Name, "oldStatus", orig.Status, "newStatus", now.Status, "message", now.Message, "reason", now.Reason) + } + + newAPIService, err := c.apiServiceClient.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + + c.metrics.SetUnavailableCounter(originalAPIService, newAPIService) + return newAPIService, nil +} + +// Run starts the AvailableConditionController loop which manages the availability condition of API services. +func (c *AvailableConditionController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Info("Starting LocalAvailability controller") + defer klog.Info("Shutting down LocalAvailability controller") + + // This waits not just for the informers to sync, but for our handlers + // to be called; since the handlers are three different ways of + // enqueueing the same thing, waiting for this permits the queue to + // maximally de-duplicate the entries. + if !controllers.WaitForCacheSync("LocalAvailability", stopCh, c.apiServiceSynced) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *AvailableConditionController) runWorker() { + for c.processNextWorkItem() { + } +} + +// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit. +func (c *AvailableConditionController) processNextWorkItem() bool { + key, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(key) + + err := c.syncFn(key) + if err == nil { + c.queue.Forget(key) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %w", key, err)) + c.queue.AddRateLimited(key) + + return true +} + +func (c *AvailableConditionController) addAPIService(obj interface{}) { + castObj := obj.(*apiregistrationv1.APIService) + klog.V(4).Infof("Adding %s", castObj.Name) + c.queue.Add(castObj.Name) +} + +func (c *AvailableConditionController) updateAPIService(oldObj, _ interface{}) { + oldCastObj := oldObj.(*apiregistrationv1.APIService) + klog.V(4).Infof("Updating %s", oldCastObj.Name) + c.queue.Add(oldCastObj.Name) +} + +func (c *AvailableConditionController) deleteAPIService(obj interface{}) { + castObj, ok := obj.(*apiregistrationv1.APIService) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + klog.Errorf("Couldn't get object from tombstone %#v", obj) + return + } + castObj, ok = tombstone.Obj.(*apiregistrationv1.APIService) + if !ok { + klog.Errorf("Tombstone contained object that is not expected %#v", obj) + return + } + } + klog.V(4).Infof("Deleting %q", castObj.Name) + c.queue.Add(castObj.Name) +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/metrics/metrics.go similarity index 72% rename from vendor/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go rename to vendor/k8s.io/kube-aggregator/pkg/controllers/status/metrics/metrics.go index b0653f5988..ebde367597 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/metrics/metrics.go @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apiserver +package metrics import ( "sync" "k8s.io/component-base/metrics" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper" ) /* @@ -41,14 +43,14 @@ var ( ) ) -type availabilityMetrics struct { +type Metrics struct { unavailableCounter *metrics.CounterVec *availabilityCollector } -func newAvailabilityMetrics() *availabilityMetrics { - return &availabilityMetrics{ +func New() *Metrics { + return &Metrics{ unavailableCounter: metrics.NewCounterVec( &metrics.CounterOpts{ Name: "aggregator_unavailable_apiservice_total", @@ -62,7 +64,7 @@ func newAvailabilityMetrics() *availabilityMetrics { } // Register registers apiservice availability metrics. -func (m *availabilityMetrics) Register( +func (m *Metrics) Register( registrationFunc func(metrics.Registerable) error, customRegistrationFunc func(metrics.StableCollector) error, ) error { @@ -80,7 +82,7 @@ func (m *availabilityMetrics) Register( } // UnavailableCounter returns a counter to track apiservices marked as unavailable. -func (m *availabilityMetrics) UnavailableCounter(apiServiceName, reason string) metrics.CounterMetric { +func (m *Metrics) UnavailableCounter(apiServiceName, reason string) metrics.CounterMetric { return m.unavailableCounter.WithLabelValues(apiServiceName, reason) } @@ -91,6 +93,31 @@ type availabilityCollector struct { availabilities map[string]bool } +// SetUnavailableGauge set the metrics so that it reflect the current state base on availability of the given service +func (m *Metrics) SetUnavailableGauge(newAPIService *apiregistrationv1.APIService) { + if apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) { + m.SetAPIServiceAvailable(newAPIService.Name) + return + } + + m.SetAPIServiceUnavailable(newAPIService.Name) +} + +// SetUnavailableCounter increases the metrics only if the given service is unavailable and its APIServiceCondition has changed +func (m *Metrics) SetUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.APIService) { + wasAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(originalAPIService, apiregistrationv1.Available) + isAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) + statusChanged := isAvailable != wasAvailable + + if statusChanged && !isAvailable { + reason := "UnknownReason" + if newCondition := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available); newCondition != nil { + reason = newCondition.Reason + } + m.UnavailableCounter(newAPIService.Name, reason).Inc() + } +} + // Check if apiServiceStatusCollector implements necessary interface. var _ metrics.StableCollector = &availabilityCollector{} diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller.go similarity index 87% rename from vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go rename to vendor/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller.go index 6277a81a4a..ade744708c 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/remote/remote_available_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apiserver +package remote import ( "context" @@ -39,7 +39,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/transport" "k8s.io/client-go/util/workqueue" - "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog/v2" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper" @@ -47,11 +46,9 @@ import ( informers "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1" listers "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1" "k8s.io/kube-aggregator/pkg/controllers" + availabilitymetrics "k8s.io/kube-aggregator/pkg/controllers/status/metrics" ) -// making sure we only register metrics once into legacy registry -var registerIntoLegacyRegistryOnce sync.Once - type certKeyFunc func() ([]byte, []byte) // ServiceResolver knows how to convert a service reference into an actual location. @@ -81,18 +78,18 @@ type AvailableConditionController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // map from service-namespace -> service-name -> apiservice names cache map[string]map[string][]string // this lock protects operations on the above cache cacheLock sync.RWMutex // metrics registered into legacy registry - metrics *availabilityMetrics + metrics *availabilitymetrics.Metrics } -// NewAvailableConditionController returns a new AvailableConditionController. -func NewAvailableConditionController( +// New returns a new remote APIService AvailableConditionController. +func New( apiServiceInformer informers.APIServiceInformer, serviceInformer v1informers.ServiceInformer, endpointsInformer v1informers.EndpointsInformer, @@ -100,6 +97,7 @@ func NewAvailableConditionController( proxyTransportDial *transport.DialHolder, proxyCurrentCertKeyContent certKeyFunc, serviceResolver ServiceResolver, + metrics *availabilitymetrics.Metrics, ) (*AvailableConditionController, error) { c := &AvailableConditionController{ apiServiceClient: apiServiceClient, @@ -107,15 +105,16 @@ func NewAvailableConditionController( serviceLister: serviceInformer.Lister(), endpointsLister: endpointsInformer.Lister(), serviceResolver: serviceResolver, - queue: workqueue.NewNamedRateLimitingQueue( + queue: workqueue.NewTypedRateLimitingQueueWithConfig( // We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the // service network, it is possible for an external, non-watchable factor to affect availability. This keeps // the maximum disruption time to a minimum, but it does prevent hot loops. - workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second), - "AvailableConditionController"), + workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "RemoteAvailabilityController"}, + ), proxyTransportDial: proxyTransportDial, proxyCurrentCertKeyContent: proxyCurrentCertKeyContent, - metrics: newAvailabilityMetrics(), + metrics: metrics, } // resync on this one because it is low cardinality and rechecking the actual discovery @@ -147,15 +146,6 @@ func NewAvailableConditionController( c.syncFn = c.sync - // TODO: decouple from legacyregistry - var err error - registerIntoLegacyRegistryOnce.Do(func() { - err = c.metrics.Register(legacyregistry.Register, legacyregistry.CustomRegister) - }) - if err != nil { - return nil, err - } - return c, nil } @@ -169,6 +159,13 @@ func (c *AvailableConditionController) sync(key string) error { return err } + if originalAPIService.Spec.Service == nil { + // handled by the local APIService controller + return nil + } + + apiService := originalAPIService.DeepCopy() + // if a particular transport was specified, use that otherwise build one // construct an http client that will ignore TLS verification (if someone owns the network and messes with your status // that's not so bad) and sets a very short timeout. This is a best effort GET that provides no additional information @@ -198,21 +195,12 @@ func (c *AvailableConditionController) sync(key string) error { }, } - apiService := originalAPIService.DeepCopy() - availableCondition := apiregistrationv1.APIServiceCondition{ Type: apiregistrationv1.Available, Status: apiregistrationv1.ConditionTrue, LastTransitionTime: metav1.Now(), } - // local API services are always considered available - if apiService.Spec.Service == nil { - apiregistrationv1apihelper.SetAPIServiceCondition(apiService, apiregistrationv1apihelper.NewLocalAvailableAPIServiceCondition()) - _, err := c.updateAPIServiceStatus(originalAPIService, apiService) - return err - } - service, err := c.serviceLister.Services(apiService.Spec.Service.Namespace).Get(apiService.Spec.Service.Name) if apierrors.IsNotFound(err) { availableCondition.Status = apiregistrationv1.ConditionFalse @@ -323,7 +311,7 @@ func (c *AvailableConditionController) sync(key string) error { resp.Body.Close() // we should always been in the 200s or 300s if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices { - errCh <- fmt.Errorf("bad status from %v: %v", discoveryURL, resp.StatusCode) + errCh <- fmt.Errorf("bad status from %v: %d", discoveryURL, resp.StatusCode) return } } @@ -334,7 +322,7 @@ func (c *AvailableConditionController) sync(key string) error { select { case err = <-errCh: if err != nil { - results <- fmt.Errorf("failing or missing response from %v: %v", discoveryURL, err) + results <- fmt.Errorf("failing or missing response from %v: %w", discoveryURL, err) return } @@ -384,7 +372,7 @@ func (c *AvailableConditionController) sync(key string) error { // apiservices. Doing that means we don't want to quickly issue no-op updates. func (c *AvailableConditionController) updateAPIServiceStatus(originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) { // update this metric on every sync operation to reflect the actual state - c.setUnavailableGauge(newAPIService) + c.metrics.SetUnavailableGauge(newAPIService) if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) { return newAPIService, nil @@ -411,7 +399,7 @@ func (c *AvailableConditionController) updateAPIServiceStatus(originalAPIService return nil, err } - c.setUnavailableCounter(originalAPIService, newAPIService) + c.metrics.SetUnavailableCounter(originalAPIService, newAPIService) return newAPIService, nil } @@ -420,14 +408,14 @@ func (c *AvailableConditionController) Run(workers int, stopCh <-chan struct{}) defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Info("Starting AvailableConditionController") - defer klog.Info("Shutting down AvailableConditionController") + klog.Info("Starting RemoteAvailability controller") + defer klog.Info("Shutting down RemoteAvailability controller") // This waits not just for the informers to sync, but for our handlers // to be called; since the handlers are three different ways of // enqueueing the same thing, waiting for this permits the queue to // maximally de-duplicate the entries. - if !controllers.WaitForCacheSync("AvailableConditionController", stopCh, c.apiServiceSynced, c.servicesSynced, c.endpointsSynced) { + if !controllers.WaitForCacheSync("RemoteAvailability", stopCh, c.apiServiceSynced, c.servicesSynced, c.endpointsSynced) { return } @@ -451,7 +439,7 @@ func (c *AvailableConditionController) processNextWorkItem() bool { } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true @@ -598,28 +586,3 @@ func (c *AvailableConditionController) deleteEndpoints(obj interface{}) { c.queue.Add(apiService) } } - -// setUnavailableGauge set the metrics so that it reflect the current state base on availability of the given service -func (c *AvailableConditionController) setUnavailableGauge(newAPIService *apiregistrationv1.APIService) { - if apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) { - c.metrics.SetAPIServiceAvailable(newAPIService.Name) - return - } - - c.metrics.SetAPIServiceUnavailable(newAPIService.Name) -} - -// setUnavailableCounter increases the metrics only if the given service is unavailable and its APIServiceCondition has changed -func (c *AvailableConditionController) setUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.APIService) { - wasAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(originalAPIService, apiregistrationv1.Available) - isAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) - statusChanged := isAvailable != wasAvailable - - if statusChanged && !isAvailable { - reason := "UnknownReason" - if newCondition := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available); newCondition != nil { - reason = newCondition.Reason - } - c.metrics.UnavailableCounter(newAPIService.Name, reason).Inc() - } -} diff --git a/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/OWNERS b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/OWNERS new file mode 100644 index 0000000000..a976ef860b --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +labels: + - sig/etcd diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go index 081dae306f..0ce85af9f1 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go @@ -285,7 +285,10 @@ func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error { sortParameters(pathItem.Parameters) for _, route := range routes { - op, _ := o.buildOperations(route, inPathCommonParamsMap) + op, err := o.buildOperations(route, inPathCommonParamsMap) + if err != nil { + return err + } sortParameters(op.Parameters) switch strings.ToUpper(route.Method()) { diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go index 68d21e968f..c3b31e31fb 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go @@ -43,7 +43,7 @@ import ( "k8s.io/client-go/util/csaupgrade" "k8s.io/component-base/version" "k8s.io/klog/v2" - "k8s.io/kubectl/pkg/cmd/delete" + cmddelete "k8s.io/kubectl/pkg/cmd/delete" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/scheme" "k8s.io/kubectl/pkg/util" @@ -61,7 +61,7 @@ type ApplyFlags struct { RecordFlags *genericclioptions.RecordFlags PrintFlags *genericclioptions.PrintFlags - DeleteFlags *delete.DeleteFlags + DeleteFlags *cmddelete.DeleteFlags FieldManager string Selector string @@ -84,7 +84,7 @@ type ApplyOptions struct { PrintFlags *genericclioptions.PrintFlags ToPrinter func(string) (printers.ResourcePrinter, error) - DeleteOptions *delete.DeleteOptions + DeleteOptions *cmddelete.DeleteOptions ServerSideApply bool ForceConflicts bool @@ -182,7 +182,7 @@ var ApplySetToolVersion = version.Get().GitVersion func NewApplyFlags(streams genericiooptions.IOStreams) *ApplyFlags { return &ApplyFlags{ RecordFlags: genericclioptions.NewRecordFlags(), - DeleteFlags: delete.NewDeleteFlags("The files that contain the configurations to apply."), + DeleteFlags: cmddelete.NewDeleteFlags("The files that contain the configurations to apply."), PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), Overwrite: true, @@ -681,6 +681,12 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts` return cmdutil.AddSourceToErr("creating", info.Source, err) } + // prune nulls when client-side apply does a create to match what will happen when client-side applying an update. + // do this after CreateApplyAnnotation so the annotation matches what will be persisted on an update apply of the same manifest. + if u, ok := info.Object.(runtime.Unstructured); ok { + pruneNullsFromMap(u.UnstructuredContent()) + } + if o.DryRunStrategy != cmdutil.DryRunClient { // Then create the resource and skip the three-way merge obj, err := helper.Create(info.Namespace, true, info.Object) @@ -759,6 +765,29 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts` return nil } +func pruneNullsFromMap(data map[string]interface{}) { + for k, v := range data { + if v == nil { + delete(data, k) + } else { + pruneNulls(v) + } + } +} +func pruneNullsFromSlice(data []interface{}) { + for _, v := range data { + pruneNulls(v) + } +} +func pruneNulls(v interface{}) { + switch v := v.(type) { + case map[string]interface{}: + pruneNullsFromMap(v) + case []interface{}: + pruneNullsFromSlice(v) + } +} + // Saves the last-applied-configuration annotation in a separate SSA field manager // to prevent it from being dropped by users who have transitioned to SSA. // diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go index cbe3b0307d..4903beb812 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go @@ -49,8 +49,6 @@ import ( const ( // maxPatchRetry is the maximum number of conflicts retry for during a patch operation before returning failure maxPatchRetry = 5 - // backOffPeriod is the period to back off when apply patch results in error. - backOffPeriod = 1 * time.Second // how many times we can retry before back off triesBeforeBackOff = 1 // groupVersionKindExtensionKey is the key used to lookup the @@ -59,6 +57,9 @@ const ( groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" ) +// patchRetryBackOffPeriod is the period to back off when apply patch results in error. +var patchRetryBackOffPeriod = 1 * time.Second + var createPatchErrFormat = "creating patch with:\noriginal:\n%s\nmodified:\n%s\ncurrent:\n%s\nfor:" // Patcher defines options to patch OpenAPI objects. @@ -363,7 +364,7 @@ func (p *Patcher) Patch(current runtime.Object, modified []byte, source, namespa } for i := 1; i <= p.Retries && apierrors.IsConflict(err); i++ { if i > triesBeforeBackOff { - p.BackOff.Sleep(backOffPeriod) + p.BackOff.Sleep(patchRetryBackOffPeriod) } current, getErr = p.Helper.Get(namespace, name) if getErr != nil { diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/get.go b/vendor/k8s.io/kubectl/pkg/cmd/get/get.go index f16586994a..a1bc13cb1a 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/get/get.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/get.go @@ -49,7 +49,7 @@ import ( "k8s.io/kubectl/pkg/util/interrupt" "k8s.io/kubectl/pkg/util/slice" "k8s.io/kubectl/pkg/util/templates" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // GetOptions contains the input to the get command. @@ -91,8 +91,8 @@ var ( Prints a table of the most important information about the specified resources. You can filter the list using a label selector and the --selector flag. If the - desired resource type is namespaced you will only see results in your current - namespace unless you pass --all-namespaces. + desired resource type is namespaced you will only see results in the current + namespace if you don't specify any namespace. By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resources.`)) @@ -132,7 +132,13 @@ var ( kubectl get rc/web service/frontend pods/web-pod-13je7 # List the 'status' subresource for a single pod - kubectl get pod web-pod-13je7 --subresource status`)) + kubectl get pod web-pod-13je7 --subresource status + + # List all deployments in namespace 'backend' + kubectl get deployments.apps --namespace backend + + # List all pods existing in all namespaces + kubectl get pods --all-namespaces`)) ) const ( @@ -267,9 +273,13 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri } switch { - case o.Watch || o.WatchOnly: + case o.Watch: + if len(o.SortBy) > 0 { + fmt.Fprintf(o.IOStreams.ErrOut, "warning: --watch requested, --sort-by will be ignored for watch events received\n") + } + case o.WatchOnly: if len(o.SortBy) > 0 { - fmt.Fprintf(o.IOStreams.ErrOut, "warning: --watch or --watch-only requested, --sort-by will be ignored\n") + fmt.Fprintf(o.IOStreams.ErrOut, "warning: --watch-only requested, --sort-by will be ignored\n") } default: if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { @@ -627,7 +637,7 @@ func (o *GetOptions) watch(f cmdutil.Factory, args []string) error { info := infos[0] mapping := info.ResourceMapping() - outputObjects := utilpointer.BoolPtr(!o.WatchOnly) + outputObjects := ptr.To(!o.WatchOnly) printer, err := o.ToPrinter(mapping, outputObjects, o.AllNamespaces, false) if err != nil { return err diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go b/vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go index 7fe2437e42..e7eea2d614 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go @@ -32,8 +32,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/printers" "k8s.io/client-go/util/jsonpath" - - "github.com/fvbommel/sortorder" ) // SortingPrinter sorts list types before delegating to another printer. @@ -180,7 +178,7 @@ func isLess(i, j reflect.Value) (bool, error) { case reflect.Float32, reflect.Float64: return i.Float() < j.Float(), nil case reflect.String: - return sortorder.NaturalLess(i.String(), j.String()), nil + return i.String() < j.String(), nil case reflect.Pointer: return isLess(i.Elem(), j.Elem()) case reflect.Struct: @@ -274,11 +272,11 @@ func isLess(i, j reflect.Value) (bool, error) { // check if it's a Quantity itypeQuantity, err := resource.ParseQuantity(itype) if err != nil { - return sortorder.NaturalLess(itype, jtype), nil + return itype < jtype, nil } jtypeQuantity, err := resource.ParseQuantity(jtype) if err != nil { - return sortorder.NaturalLess(itype, jtype), nil + return itype < jtype, nil } // Both strings are quantity return itypeQuantity.Cmp(jtypeQuantity) < 0, nil diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go index 6ac8117ef5..2f60808f4b 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go @@ -29,8 +29,8 @@ import ( goruntime "runtime" "strings" - jsonpatch "github.com/evanphx/json-patch" "github.com/spf13/cobra" + jsonpatch "gopkg.in/evanphx/json-patch.v4" "k8s.io/klog/v2" corev1 "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go index f7601887e7..30667151b7 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go @@ -17,17 +17,10 @@ limitations under the License. package editor import ( - "fmt" "io" "os" - "os/exec" - "path/filepath" "runtime" "strings" - - "k8s.io/klog/v2" - - "k8s.io/kubectl/pkg/util/term" ) const ( @@ -96,45 +89,6 @@ func defaultEnvEditor(envs []string) ([]string, bool) { return append(shell, editor), true } -func (e Editor) args(path string) []string { - args := make([]string, len(e.Args)) - copy(args, e.Args) - if e.Shell { - last := args[len(args)-1] - args[len(args)-1] = fmt.Sprintf("%s %q", last, path) - } else { - args = append(args, path) - } - return args -} - -// Launch opens the described or returns an error. The TTY will be protected, and -// SIGQUIT, SIGTERM, and SIGINT will all be trapped. -func (e Editor) Launch(path string) error { - if len(e.Args) == 0 { - return fmt.Errorf("no editor defined, can't open %s", path) - } - abs, err := filepath.Abs(path) - if err != nil { - return err - } - args := e.args(abs) - cmd := exec.Command(args[0], args[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin - klog.V(5).Infof("Opening file with editor %v", args) - if err := (term.TTY{In: os.Stdin, TryDev: true}).Safe(cmd.Run); err != nil { - if err, ok := err.(*exec.Error); ok { - if err.Err == exec.ErrNotFound { - return fmt.Errorf("unable to launch the editor %q", strings.Join(e.Args, " ")) - } - } - return fmt.Errorf("there was a problem with the editor %q", strings.Join(e.Args, " ")) - } - return nil -} - // LaunchTempFile reads the provided stream into a temporary file in the given directory // and file prefix, and then invokes Launch with the path of that file. It will return // the contents of the file after launch, any errors that occur, and the path of the diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/launcher_others.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/launcher_others.go new file mode 100644 index 0000000000..4298a3329c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/launcher_others.go @@ -0,0 +1,70 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package editor + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "k8s.io/klog/v2" + + "k8s.io/kubectl/pkg/util/term" +) + +func (e Editor) args(path string) []string { + args := make([]string, len(e.Args)) + copy(args, e.Args) + if e.Shell { + last := args[len(args)-1] + args[len(args)-1] = fmt.Sprintf("%s %q", last, path) + } else { + args = append(args, path) + } + return args +} + +// Launch opens the described or returns an error. The TTY will be protected, and +// SIGQUIT, SIGTERM, and SIGINT will all be trapped. +func (e Editor) Launch(path string) error { + if len(e.Args) == 0 { + return fmt.Errorf("no editor defined, can't open %s", path) + } + abs, err := filepath.Abs(path) + if err != nil { + return err + } + args := e.args(abs) + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + klog.V(5).Infof("Opening file with editor %v", args) + if err := (term.TTY{In: os.Stdin, TryDev: true}).Safe(cmd.Run); err != nil { + if errors.Is(err, exec.ErrNotFound) { + return fmt.Errorf("unable to launch the editor %q", strings.Join(e.Args, " ")) + } + return fmt.Errorf("there was a problem with the editor %q", strings.Join(e.Args, " ")) + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/launcher_windows.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/launcher_windows.go new file mode 100644 index 0000000000..52105dd52a --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/launcher_windows.go @@ -0,0 +1,93 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package editor + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "k8s.io/klog/v2" + + "k8s.io/kubectl/pkg/util/term" +) + +// Enclose argument in double-quotes. Double each double-quote character as +// an escape sequence. +func cmdQuoteArg(arg string) string { + var result strings.Builder + result.WriteString(`"`) + result.WriteString(strings.ReplaceAll(arg, `"`, `""`)) + result.WriteString(`"`) + return result.String() +} + +func (e Editor) args(path string) []string { + args := make([]string, len(e.Args)) + copy(args, e.Args) + if e.Shell { + last := args[len(args)-1] + if args[0] == windowsShell { + // Use double-quotation around whole command line string + // See https://stackoverflow.com/a/6378038 + args[len(args)-1] = fmt.Sprintf(`"%s %s"`, last, cmdQuoteArg(path)) + } else { + args[len(args)-1] = fmt.Sprintf("%s %q", last, path) + } + } else { + args = append(args, path) + } + return args +} + +// Launch opens the described or returns an error. The TTY will be protected, and +// SIGQUIT, SIGTERM, and SIGINT will all be trapped. +func (e Editor) Launch(path string) error { + if len(e.Args) == 0 { + return fmt.Errorf("no editor defined, can't open %s", path) + } + abs, err := filepath.Abs(path) + if err != nil { + return err + } + args := e.args(abs) + var cmd *exec.Cmd + if e.Shell && args[0] == windowsShell { + // Pass all arguments to cmd.exe as one string + // See https://pkg.go.dev/os/exec#Command + cmd = exec.Command(args[0]) + cmd.SysProcAttr = &syscall.SysProcAttr{} + cmd.SysProcAttr.CmdLine = strings.Join(args, " ") + } else { + cmd = exec.Command(args[0], args[1:]...) + } + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + klog.V(5).Infof("Opening file with editor %v", args) + if err := (term.TTY{In: os.Stdin, TryDev: true}).Safe(cmd.Run); err != nil { + if errors.Is(err, exec.ErrNotFound) { + return fmt.Errorf("unable to launch the editor %q", strings.Join(e.Args, " ")) + } + return fmt.Errorf("there was a problem with the editor %q", strings.Join(e.Args, " ")) + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go index 2218b9f5b3..bd291a8c91 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -27,9 +27,9 @@ import ( "strings" "time" - jsonpatch "github.com/evanphx/json-patch" "github.com/spf13/cobra" "github.com/spf13/pflag" + jsonpatch "gopkg.in/evanphx/json-patch.v4" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" diff --git a/vendor/k8s.io/kubectl/pkg/cmd/wait/condition.go b/vendor/k8s.io/kubectl/pkg/cmd/wait/condition.go new file mode 100644 index 0000000000..7dec0de2e4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/wait/condition.go @@ -0,0 +1,197 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" + "fmt" + "io" + "strings" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/kubectl/pkg/util/interrupt" +) + +// ConditionalWait hold information to check an API status condition +type ConditionalWait struct { + conditionName string + conditionStatus string + // errOut is written to if an error occurs + errOut io.Writer +} + +// IsConditionMet is a conditionfunc for waiting on an API condition to be met +func (w ConditionalWait) IsConditionMet(ctx context.Context, info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + return getObjAndCheckCondition(ctx, info, o, w.isConditionMet, w.checkCondition) +} + +func (w ConditionalWait) checkCondition(obj *unstructured.Unstructured) (bool, error) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil { + return false, err + } + if !found { + return false, nil + } + for _, conditionUncast := range conditions { + condition := conditionUncast.(map[string]interface{}) + name, found, err := unstructured.NestedString(condition, "type") + if !found || err != nil || !strings.EqualFold(name, w.conditionName) { + continue + } + status, found, err := unstructured.NestedString(condition, "status") + if !found || err != nil { + continue + } + generation, found, _ := unstructured.NestedInt64(obj.Object, "metadata", "generation") + if found { + observedGeneration, found := getObservedGeneration(obj, condition) + if found && observedGeneration < generation { + return false, nil + } + } + return strings.EqualFold(status, w.conditionStatus), nil + } + + return false, nil +} + +func (w ConditionalWait) isConditionMet(event watch.Event) (bool, error) { + if event.Type == watch.Error { + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server + err := apierrors.FromObject(event.Object) + fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the condition to be satisfied: %v", err) + return false, nil + } + if event.Type == watch.Deleted { + // this will chain back out, result in another get and an return false back up the chain + return false, nil + } + obj := event.Object.(*unstructured.Unstructured) + return w.checkCondition(obj) +} + +type isCondMetFunc func(event watch.Event) (bool, error) +type checkCondFunc func(obj *unstructured.Unstructured) (bool, error) + +// getObjAndCheckCondition will make a List query to the API server to get the object and check if the condition is met using check function. +// If the condition is not met, it will make a Watch query to the server and pass in the condMet function +func getObjAndCheckCondition(ctx context.Context, info *resource.Info, o *WaitOptions, condMet isCondMetFunc, check checkCondFunc) (runtime.Object, bool, error) { + if len(info.Name) == 0 { + return info.Object, false, fmt.Errorf("resource name must be provided") + } + + endTime := time.Now().Add(o.Timeout) + timeout := time.Until(endTime) + errWaitTimeoutWithName := extendErrWaitTimeout(wait.ErrWaitTimeout, info) // nolint:staticcheck // SA1019 + if o.Timeout == 0 { + // If timeout is zero we will fetch the object(s) once only and check + gottenObj, initObjGetErr := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(context.Background(), info.Name, metav1.GetOptions{}) + if initObjGetErr != nil { + return nil, false, initObjGetErr + } + if gottenObj == nil { + return nil, false, fmt.Errorf("condition not met for %s", info.ObjectName()) + } + conditionCheck, err := check(gottenObj) + if err != nil { + return gottenObj, false, err + } + if !conditionCheck { + return gottenObj, false, fmt.Errorf("condition not met for %s", info.ObjectName()) + } + return gottenObj, true, nil + } + if timeout < 0 { + // we're out of time + return info.Object, false, errWaitTimeoutWithName + } + + mapping := info.ResourceMapping() // used to pass back meaningful errors if object disappears + fieldSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), options) + }, + } + + // this function is used to refresh the cache to prevent timeout waits on resources that have disappeared + preconditionFunc := func(store cache.Store) (bool, error) { + _, exists, err := store.Get(&metav1.ObjectMeta{Namespace: info.Namespace, Name: info.Name}) + if err != nil { + return true, err + } + if !exists { + return true, apierrors.NewNotFound(mapping.Resource.GroupResource(), info.Name) + } + + return false, nil + } + + intrCtx, cancel := context.WithCancel(ctx) + defer cancel() + var result runtime.Object + intr := interrupt.New(nil, cancel) + err := intr.Run(func() error { + ev, err := watchtools.UntilWithSync(intrCtx, lw, &unstructured.Unstructured{}, preconditionFunc, watchtools.ConditionFunc(condMet)) + if ev != nil { + result = ev.Object + } + if errors.Is(err, context.DeadlineExceeded) { + return errWaitTimeoutWithName + } + return err + }) + if err != nil { + if errors.Is(err, wait.ErrWaitTimeout) { // nolint:staticcheck // SA1019 + return result, false, errWaitTimeoutWithName + } + return result, false, err + } + + return result, true, nil +} + +func extendErrWaitTimeout(err error, info *resource.Info) error { + return fmt.Errorf("%s on %s/%s", err.Error(), info.Mapping.Resource.Resource, info.Name) +} + +func getObservedGeneration(obj *unstructured.Unstructured, condition map[string]interface{}) (int64, bool) { + conditionObservedGeneration, found, _ := unstructured.NestedInt64(condition, "observedGeneration") + if found { + return conditionObservedGeneration, true + } + statusObservedGeneration, found, _ := unstructured.NestedInt64(obj.Object, "status", "observedGeneration") + return statusObservedGeneration, found +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/wait/create.go b/vendor/k8s.io/kubectl/pkg/cmd/wait/create.go new file mode 100644 index 0000000000..1d5eca7e80 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/wait/create.go @@ -0,0 +1,33 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" +) + +// IsCreated is a condition func for waiting for something to be created +func IsCreated(ctx context.Context, info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + if len(info.Name) == 0 || info.Object == nil { + return nil, false, fmt.Errorf("resource name must be provided") + } + return info.Object, true, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/wait/delete.go b/vendor/k8s.io/kubectl/pkg/cmd/wait/delete.go new file mode 100644 index 0000000000..f20cbb5e42 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/wait/delete.go @@ -0,0 +1,144 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/kubectl/pkg/util/interrupt" +) + +// IsDeleted is a condition func for waiting for something to be deleted +func IsDeleted(ctx context.Context, info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + if len(info.Name) == 0 { + return info.Object, false, fmt.Errorf("resource name must be provided") + } + + gottenObj, initObjGetErr := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(ctx, info.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(initObjGetErr) { + return info.Object, true, nil + } + if initObjGetErr != nil { + // TODO this could do something slightly fancier if we wish + return info.Object, false, initObjGetErr + } + resourceLocation := ResourceLocation{ + GroupResource: info.Mapping.Resource.GroupResource(), + Namespace: gottenObj.GetNamespace(), + Name: gottenObj.GetName(), + } + if uid, ok := o.UIDMap[resourceLocation]; ok { + if gottenObj.GetUID() != uid { + return gottenObj, true, nil + } + } + + endTime := time.Now().Add(o.Timeout) + timeout := time.Until(endTime) + errWaitTimeoutWithName := extendErrWaitTimeout(wait.ErrWaitTimeout, info) // nolint:staticcheck // SA1019 + if o.Timeout == 0 { + // If timeout is zero check if the object exists once only + if gottenObj == nil { + return nil, true, nil + } + return gottenObj, false, fmt.Errorf("condition not met for %s", info.ObjectName()) + } + if timeout < 0 { + // we're out of time + return info.Object, false, errWaitTimeoutWithName + } + + fieldSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(ctx, options) + }, + } + + // this function is used to refresh the cache to prevent timeout waits on resources that have disappeared + preconditionFunc := func(store cache.Store) (bool, error) { + _, exists, err := store.Get(&metav1.ObjectMeta{Namespace: info.Namespace, Name: info.Name}) + if err != nil { + return true, err + } + if !exists { + // since we're looking for it to disappear we just return here if it no longer exists + return true, nil + } + + return false, nil + } + + intrCtx, cancel := context.WithCancel(ctx) + defer cancel() + intr := interrupt.New(nil, cancel) + err := intr.Run(func() error { + _, err := watchtools.UntilWithSync(intrCtx, lw, &unstructured.Unstructured{}, preconditionFunc, Wait{errOut: o.ErrOut}.IsDeleted) + if errors.Is(err, context.DeadlineExceeded) { + return errWaitTimeoutWithName + } + return err + }) + if err != nil { + if errors.Is(err, wait.ErrWaitTimeout) { // nolint:staticcheck // SA1019 + return gottenObj, false, errWaitTimeoutWithName + } + return gottenObj, false, err + } + + return gottenObj, true, nil +} + +// Wait has helper methods for handling watches, including error handling. +type Wait struct { + errOut io.Writer +} + +// IsDeleted returns true if the object is deleted. It prints any errors it encounters. +func (w Wait) IsDeleted(event watch.Event) (bool, error) { + switch event.Type { + case watch.Error: + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server if the error is unrecoverable. + err := apierrors.FromObject(event.Object) + fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the object to be deleted: %v", err) + return false, nil + case watch.Deleted: + return true, nil + default: + return false, nil + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/wait/json.go b/vendor/k8s.io/kubectl/pkg/cmd/wait/json.go new file mode 100644 index 0000000000..ac573b6cd1 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/wait/json.go @@ -0,0 +1,120 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" + "fmt" + "io" + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/util/jsonpath" +) + +// JSONPathWait holds a JSONPath Parser which has the ability +// to check for the JSONPath condition and compare with the API server provided JSON output. +type JSONPathWait struct { + matchAnyValue bool + jsonPathValue string + jsonPathParser *jsonpath.JSONPath + // errOut is written to if an error occurs + errOut io.Writer +} + +// IsJSONPathConditionMet fulfills the requirements of the interface ConditionFunc which provides condition check +func (j JSONPathWait) IsJSONPathConditionMet(ctx context.Context, info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + return getObjAndCheckCondition(ctx, info, o, j.isJSONPathConditionMet, j.checkCondition) +} + +// isJSONPathConditionMet is a helper function of IsJSONPathConditionMet +// which check the watch event and check if a JSONPathWait condition is met +func (j JSONPathWait) isJSONPathConditionMet(event watch.Event) (bool, error) { + if event.Type == watch.Error { + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server + err := apierrors.FromObject(event.Object) + fmt.Fprintf(j.errOut, "error: An error occurred while waiting for the condition to be satisfied: %v", err) + return false, nil + } + if event.Type == watch.Deleted { + // this will chain back out, result in another get and an return false back up the chain + return false, nil + } + // event runtime Object can be safely asserted to Unstructed + // because we are working with dynamic client + obj := event.Object.(*unstructured.Unstructured) + return j.checkCondition(obj) +} + +// checkCondition uses JSONPath parser to parse the JSON received from the API server +// and check if it matches the desired condition +func (j JSONPathWait) checkCondition(obj *unstructured.Unstructured) (bool, error) { + queryObj := obj.UnstructuredContent() + parseResults, err := j.jsonPathParser.FindResults(queryObj) + if err != nil { + return false, err + } + if len(parseResults) == 0 || len(parseResults[0]) == 0 { + return false, nil + } + if err := verifyParsedJSONPath(parseResults); err != nil { + return false, err + } + if j.matchAnyValue { + return true, nil + } + isConditionMet, err := compareResults(parseResults[0][0], j.jsonPathValue) + if err != nil { + return false, err + } + return isConditionMet, nil +} + +// verifyParsedJSONPath verifies the JSON received from the API server is valid. +// It will only accept a single JSON +func verifyParsedJSONPath(results [][]reflect.Value) error { + if len(results) > 1 { + return errors.New("given jsonpath expression matches more than one list") + } + if len(results[0]) > 1 { + return errors.New("given jsonpath expression matches more than one value") + } + return nil +} + +// compareResults will compare the reflect.Value from the result parsed by the +// JSONPath parser with the expected value given by the value +// +// Since this is coming from an unstructured this can only ever be a primitive, +// map[string]interface{}, or []interface{}. +// We do not support the last two and rely on fmt to handle conversion to string +// and compare the result with user input +func compareResults(r reflect.Value, expectedVal string) (bool, error) { + switch r.Interface().(type) { + case map[string]interface{}, []interface{}: + return false, errors.New("jsonpath leads to a nested object or list which is not supported") + } + s := fmt.Sprintf("%v", r.Interface()) + return strings.TrimSpace(s) == strings.TrimSpace(expectedVal), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go b/vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go index e211ccec6c..a247ebb0e9 100644 --- a/vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go @@ -21,33 +21,26 @@ import ( "errors" "fmt" "io" - "reflect" "strings" "time" "github.com/spf13/cobra" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericiooptions" "k8s.io/cli-runtime/pkg/printers" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/cache" watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/jsonpath" cmdget "k8s.io/kubectl/pkg/cmd/get" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/util/i18n" - "k8s.io/kubectl/pkg/util/interrupt" "k8s.io/kubectl/pkg/util/templates" ) @@ -77,7 +70,7 @@ var ( # Wait for pod "busybox1" to be Ready kubectl wait --for='jsonpath={.status.conditions[?(@.type=="Ready")].status}=True' pod/busybox1 - # Wait for the service "loadbalancer" to have ingress. + # Wait for the service "loadbalancer" to have ingress kubectl wait --for=jsonpath='{.status.loadBalancer.ingress}' service/loadbalancer # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command @@ -194,11 +187,16 @@ func (flags *WaitFlags) ToOptions(args []string) (*WaitOptions, error) { } func conditionFuncFor(condition string, errOut io.Writer) (ConditionFunc, error) { - if strings.ToLower(condition) == "delete" { + lowercaseCond := strings.ToLower(condition) + switch { + case lowercaseCond == "delete": return IsDeleted, nil - } - if strings.HasPrefix(condition, "condition=") { - conditionName := condition[len("condition="):] + + case lowercaseCond == "create": + return IsCreated, nil + + case strings.HasPrefix(condition, "condition="): + conditionName := strings.TrimPrefix(condition, "condition=") conditionValue := "true" if equalsIndex := strings.Index(conditionName, "="); equalsIndex != -1 { conditionValue = conditionName[equalsIndex+1:] @@ -210,8 +208,8 @@ func conditionFuncFor(condition string, errOut io.Writer) (ConditionFunc, error) conditionStatus: conditionValue, errOut: errOut, }.IsConditionMet, nil - } - if strings.HasPrefix(condition, "jsonpath=") { + + case strings.HasPrefix(condition, "jsonpath="): jsonPathInput := strings.TrimPrefix(condition, "jsonpath=") jsonPathExp, jsonPathValue, err := processJSONPathInput(jsonPathInput) if err != nil { @@ -320,6 +318,31 @@ func (o *WaitOptions) RunWait() error { ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout) defer cancel() + if strings.ToLower(o.ForCondition) == "create" { + // TODO(soltysh): this is not ideal solution, because we're polling every .5s, + // and we have to use ResourceFinder, which contains the resource name. + // In the long run, we should expose resource information from ResourceFinder, + // or functions from ResourceBuilder for parsing those. Lastly, this poll + // should be replaced with a ListWatch cache. + if err := wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, o.Timeout, true, func(context.Context) (done bool, err error) { + visitErr := o.ResourceFinder.Do().Visit(func(info *resource.Info, err error) error { + return nil + }) + if apierrors.IsNotFound(visitErr) { + return false, nil + } + if visitErr != nil { + return false, visitErr + } + return true, nil + }); err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("%s", wait.ErrWaitTimeout.Error()) // nolint:staticcheck // SA1019 + } + return err + } + } + visitCount := 0 visitFunc := func(info *resource.Info, err error) error { if err != nil { @@ -352,356 +375,3 @@ func (o *WaitOptions) RunWait() error { } return err } - -// IsDeleted is a condition func for waiting for something to be deleted -func IsDeleted(ctx context.Context, info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { - if len(info.Name) == 0 { - return info.Object, false, fmt.Errorf("resource name must be provided") - } - - gottenObj, initObjGetErr := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(context.Background(), info.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(initObjGetErr) { - return info.Object, true, nil - } - if initObjGetErr != nil { - // TODO this could do something slightly fancier if we wish - return info.Object, false, initObjGetErr - } - resourceLocation := ResourceLocation{ - GroupResource: info.Mapping.Resource.GroupResource(), - Namespace: gottenObj.GetNamespace(), - Name: gottenObj.GetName(), - } - if uid, ok := o.UIDMap[resourceLocation]; ok { - if gottenObj.GetUID() != uid { - return gottenObj, true, nil - } - } - - endTime := time.Now().Add(o.Timeout) - timeout := time.Until(endTime) - errWaitTimeoutWithName := extendErrWaitTimeout(wait.ErrWaitTimeout, info) - if o.Timeout == 0 { - // If timeout is zero check if the object exists once only - if gottenObj == nil { - return nil, true, nil - } - return gottenObj, false, fmt.Errorf("condition not met for %s", info.ObjectName()) - } - if timeout < 0 { - // we're out of time - return info.Object, false, errWaitTimeoutWithName - } - - fieldSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() - lw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = fieldSelector - return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = fieldSelector - return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), options) - }, - } - - // this function is used to refresh the cache to prevent timeout waits on resources that have disappeared - preconditionFunc := func(store cache.Store) (bool, error) { - _, exists, err := store.Get(&metav1.ObjectMeta{Namespace: info.Namespace, Name: info.Name}) - if err != nil { - return true, err - } - if !exists { - // since we're looking for it to disappear we just return here if it no longer exists - return true, nil - } - - return false, nil - } - - intrCtx, cancel := context.WithCancel(ctx) - defer cancel() - intr := interrupt.New(nil, cancel) - err := intr.Run(func() error { - _, err := watchtools.UntilWithSync(intrCtx, lw, &unstructured.Unstructured{}, preconditionFunc, Wait{errOut: o.ErrOut}.IsDeleted) - if errors.Is(err, context.DeadlineExceeded) { - return errWaitTimeoutWithName - } - return err - }) - if err != nil { - if err == wait.ErrWaitTimeout { - return gottenObj, false, errWaitTimeoutWithName - } - return gottenObj, false, err - } - - return gottenObj, true, nil -} - -// Wait has helper methods for handling watches, including error handling. -type Wait struct { - errOut io.Writer -} - -// IsDeleted returns true if the object is deleted. It prints any errors it encounters. -func (w Wait) IsDeleted(event watch.Event) (bool, error) { - switch event.Type { - case watch.Error: - // keep waiting in the event we see an error - we expect the watch to be closed by - // the server if the error is unrecoverable. - err := apierrors.FromObject(event.Object) - fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the object to be deleted: %v", err) - return false, nil - case watch.Deleted: - return true, nil - default: - return false, nil - } -} - -type isCondMetFunc func(event watch.Event) (bool, error) -type checkCondFunc func(obj *unstructured.Unstructured) (bool, error) - -// getObjAndCheckCondition will make a List query to the API server to get the object and check if the condition is met using check function. -// If the condition is not met, it will make a Watch query to the server and pass in the condMet function -func getObjAndCheckCondition(ctx context.Context, info *resource.Info, o *WaitOptions, condMet isCondMetFunc, check checkCondFunc) (runtime.Object, bool, error) { - if len(info.Name) == 0 { - return info.Object, false, fmt.Errorf("resource name must be provided") - } - - endTime := time.Now().Add(o.Timeout) - timeout := time.Until(endTime) - errWaitTimeoutWithName := extendErrWaitTimeout(wait.ErrWaitTimeout, info) - if o.Timeout == 0 { - // If timeout is zero we will fetch the object(s) once only and check - gottenObj, initObjGetErr := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(context.Background(), info.Name, metav1.GetOptions{}) - if initObjGetErr != nil { - return nil, false, initObjGetErr - } - if gottenObj == nil { - return nil, false, fmt.Errorf("condition not met for %s", info.ObjectName()) - } - conditionCheck, err := check(gottenObj) - if err != nil { - return gottenObj, false, err - } - if conditionCheck == false { - return gottenObj, false, fmt.Errorf("condition not met for %s", info.ObjectName()) - } - return gottenObj, true, nil - } - if timeout < 0 { - // we're out of time - return info.Object, false, errWaitTimeoutWithName - } - - mapping := info.ResourceMapping() // used to pass back meaningful errors if object disappears - fieldSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() - lw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = fieldSelector - return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = fieldSelector - return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), options) - }, - } - - // this function is used to refresh the cache to prevent timeout waits on resources that have disappeared - preconditionFunc := func(store cache.Store) (bool, error) { - _, exists, err := store.Get(&metav1.ObjectMeta{Namespace: info.Namespace, Name: info.Name}) - if err != nil { - return true, err - } - if !exists { - return true, apierrors.NewNotFound(mapping.Resource.GroupResource(), info.Name) - } - - return false, nil - } - - intrCtx, cancel := context.WithCancel(ctx) - defer cancel() - var result runtime.Object - intr := interrupt.New(nil, cancel) - err := intr.Run(func() error { - ev, err := watchtools.UntilWithSync(intrCtx, lw, &unstructured.Unstructured{}, preconditionFunc, watchtools.ConditionFunc(condMet)) - if ev != nil { - result = ev.Object - } - if errors.Is(err, context.DeadlineExceeded) { - return errWaitTimeoutWithName - } - return err - }) - if err != nil { - if err == wait.ErrWaitTimeout { - return result, false, errWaitTimeoutWithName - } - return result, false, err - } - - return result, true, nil -} - -// ConditionalWait hold information to check an API status condition -type ConditionalWait struct { - conditionName string - conditionStatus string - // errOut is written to if an error occurs - errOut io.Writer -} - -// IsConditionMet is a conditionfunc for waiting on an API condition to be met -func (w ConditionalWait) IsConditionMet(ctx context.Context, info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { - return getObjAndCheckCondition(ctx, info, o, w.isConditionMet, w.checkCondition) -} - -func (w ConditionalWait) checkCondition(obj *unstructured.Unstructured) (bool, error) { - conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") - if err != nil { - return false, err - } - if !found { - return false, nil - } - for _, conditionUncast := range conditions { - condition := conditionUncast.(map[string]interface{}) - name, found, err := unstructured.NestedString(condition, "type") - if !found || err != nil || !strings.EqualFold(name, w.conditionName) { - continue - } - status, found, err := unstructured.NestedString(condition, "status") - if !found || err != nil { - continue - } - generation, found, _ := unstructured.NestedInt64(obj.Object, "metadata", "generation") - if found { - observedGeneration, found := getObservedGeneration(obj, condition) - if found && observedGeneration < generation { - return false, nil - } - } - return strings.EqualFold(status, w.conditionStatus), nil - } - - return false, nil -} - -func (w ConditionalWait) isConditionMet(event watch.Event) (bool, error) { - if event.Type == watch.Error { - // keep waiting in the event we see an error - we expect the watch to be closed by - // the server - err := apierrors.FromObject(event.Object) - fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the condition to be satisfied: %v", err) - return false, nil - } - if event.Type == watch.Deleted { - // this will chain back out, result in another get and an return false back up the chain - return false, nil - } - obj := event.Object.(*unstructured.Unstructured) - return w.checkCondition(obj) -} - -func extendErrWaitTimeout(err error, info *resource.Info) error { - return fmt.Errorf("%s on %s/%s", err.Error(), info.Mapping.Resource.Resource, info.Name) -} - -func getObservedGeneration(obj *unstructured.Unstructured, condition map[string]interface{}) (int64, bool) { - conditionObservedGeneration, found, _ := unstructured.NestedInt64(condition, "observedGeneration") - if found { - return conditionObservedGeneration, true - } - statusObservedGeneration, found, _ := unstructured.NestedInt64(obj.Object, "status", "observedGeneration") - return statusObservedGeneration, found -} - -// JSONPathWait holds a JSONPath Parser which has the ability -// to check for the JSONPath condition and compare with the API server provided JSON output. -type JSONPathWait struct { - matchAnyValue bool - jsonPathValue string - jsonPathParser *jsonpath.JSONPath - // errOut is written to if an error occurs - errOut io.Writer -} - -// IsJSONPathConditionMet fulfills the requirements of the interface ConditionFunc which provides condition check -func (j JSONPathWait) IsJSONPathConditionMet(ctx context.Context, info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { - return getObjAndCheckCondition(ctx, info, o, j.isJSONPathConditionMet, j.checkCondition) -} - -// isJSONPathConditionMet is a helper function of IsJSONPathConditionMet -// which check the watch event and check if a JSONPathWait condition is met -func (j JSONPathWait) isJSONPathConditionMet(event watch.Event) (bool, error) { - if event.Type == watch.Error { - // keep waiting in the event we see an error - we expect the watch to be closed by - // the server - err := apierrors.FromObject(event.Object) - fmt.Fprintf(j.errOut, "error: An error occurred while waiting for the condition to be satisfied: %v", err) - return false, nil - } - if event.Type == watch.Deleted { - // this will chain back out, result in another get and an return false back up the chain - return false, nil - } - // event runtime Object can be safely asserted to Unstructed - // because we are working with dynamic client - obj := event.Object.(*unstructured.Unstructured) - return j.checkCondition(obj) -} - -// checkCondition uses JSONPath parser to parse the JSON received from the API server -// and check if it matches the desired condition -func (j JSONPathWait) checkCondition(obj *unstructured.Unstructured) (bool, error) { - queryObj := obj.UnstructuredContent() - parseResults, err := j.jsonPathParser.FindResults(queryObj) - if err != nil { - return false, err - } - if len(parseResults) == 0 || len(parseResults[0]) == 0 { - return false, nil - } - if err := verifyParsedJSONPath(parseResults); err != nil { - return false, err - } - if j.matchAnyValue { - return true, nil - } - isConditionMet, err := compareResults(parseResults[0][0], j.jsonPathValue) - if err != nil { - return false, err - } - return isConditionMet, nil -} - -// verifyParsedJSONPath verifies the JSON received from the API server is valid. -// It will only accept a single JSON -func verifyParsedJSONPath(results [][]reflect.Value) error { - if len(results) > 1 { - return errors.New("given jsonpath expression matches more than one list") - } - if len(results[0]) > 1 { - return errors.New("given jsonpath expression matches more than one value") - } - return nil -} - -// compareResults will compare the reflect.Value from the result parsed by the -// JSONPath parser with the expected value given by the value -// -// Since this is coming from an unstructured this can only ever be a primitive, -// map[string]interface{}, or []interface{}. -// We do not support the last two and rely on fmt to handle conversion to string -// and compare the result with user input -func compareResults(r reflect.Value, expectedVal string) (bool, error) { - switch r.Interface().(type) { - case map[string]interface{}, []interface{}: - return false, errors.New("jsonpath leads to a nested object or list which is not supported") - } - s := fmt.Sprintf("%v", r.Interface()) - return strings.TrimSpace(s) == strings.TrimSpace(expectedVal), nil -} diff --git a/vendor/k8s.io/kubectl/pkg/describe/describe.go b/vendor/k8s.io/kubectl/pkg/describe/describe.go index d2fdb31722..fc76f23247 100644 --- a/vendor/k8s.io/kubectl/pkg/describe/describe.go +++ b/vendor/k8s.io/kubectl/pkg/describe/describe.go @@ -45,14 +45,13 @@ import ( discoveryv1beta1 "k8s.io/api/discovery/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" schedulingv1 "k8s.io/api/scheduling/v1" storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" @@ -216,8 +215,8 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]ResourceDescr {Group: networkingv1beta1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, {Group: networkingv1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, {Group: networkingv1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, - {Group: networkingv1alpha1.GroupName, Kind: "ServiceCIDR"}: &ServiceCIDRDescriber{c}, - {Group: networkingv1alpha1.GroupName, Kind: "IPAddress"}: &IPAddressDescriber{c}, + {Group: networkingv1beta1.GroupName, Kind: "ServiceCIDR"}: &ServiceCIDRDescriber{c}, + {Group: networkingv1beta1.GroupName, Kind: "IPAddress"}: &IPAddressDescriber{c}, {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, {Group: batchv1beta1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, @@ -228,7 +227,7 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]ResourceDescr {Group: certificatesv1beta1.GroupName, Kind: "CertificateSigningRequest"}: &CertificateSigningRequestDescriber{c}, {Group: storagev1.GroupName, Kind: "StorageClass"}: &StorageClassDescriber{c}, {Group: storagev1.GroupName, Kind: "CSINode"}: &CSINodeDescriber{c}, - {Group: storagev1alpha1.GroupName, Kind: "VolumeAttributesClass"}: &VolumeAttributesClassDescriber{c}, + {Group: storagev1beta1.GroupName, Kind: "VolumeAttributesClass"}: &VolumeAttributesClassDescriber{c}, {Group: policyv1beta1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c}, {Group: policyv1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c}, {Group: rbacv1.GroupName, Kind: "Role"}: &RoleDescriber{c}, @@ -811,7 +810,7 @@ func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { } printLabelsMultiline(w, "Labels", pod.Labels) printAnnotationsMultiline(w, "Annotations", pod.Annotations) - if pod.DeletionTimestamp != nil { + if pod.DeletionTimestamp != nil && pod.Status.Phase != corev1.PodFailed && pod.Status.Phase != corev1.PodSucceeded { w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pod.DeletionTimestamp)) w.Write(LEVEL_0, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds) } else { @@ -2603,7 +2602,9 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings De } func (i *IngressDescriber) describeBackendV1beta1(ns string, backend *networkingv1beta1.IngressBackend) string { - endpoints, err := i.client.CoreV1().Endpoints(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{}) + endpointSliceList, err := i.client.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, backend.ServiceName), + }) if err != nil { return fmt.Sprintf("", err) } @@ -2625,20 +2626,22 @@ func (i *IngressDescriber) describeBackendV1beta1(ns string, backend *networking } } } - return formatEndpoints(endpoints, sets.NewString(spName)) + return formatEndpointSlices(endpointSliceList.Items, sets.New(spName)) } func (i *IngressDescriber) describeBackendV1(ns string, backend *networkingv1.IngressBackend) string { if backend.Service != nil { sb := serviceBackendStringer(backend.Service) - endpoints, err := i.client.CoreV1().Endpoints(ns).Get(context.TODO(), backend.Service.Name, metav1.GetOptions{}) + endpointSliceList, err := i.client.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, backend.Service.Name), + }) if err != nil { return fmt.Sprintf("%v ()", sb, err) } service, err := i.client.CoreV1().Services(ns).Get(context.TODO(), backend.Service.Name, metav1.GetOptions{}) if err != nil { - return fmt.Sprintf("%v()", sb, err) + return fmt.Sprintf("%v ()", sb, err) } spName := "" for i := range service.Spec.Ports { @@ -2649,7 +2652,7 @@ func (i *IngressDescriber) describeBackendV1(ns string, backend *networkingv1.In spName = sp.Name } } - ep := formatEndpoints(endpoints, sets.NewString(spName)) + ep := formatEndpointSlices(endpointSliceList.Items, sets.New(spName)) return fmt.Sprintf("%s (%s)", sb, ep) } if backend.Resource != nil { @@ -2867,17 +2870,17 @@ type ServiceCIDRDescriber struct { func (c *ServiceCIDRDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { var events *corev1.EventList - svcV1alpha1, err := c.client.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), name, metav1.GetOptions{}) + svcV1beta1, err := c.client.NetworkingV1beta1().ServiceCIDRs().Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { if describerSettings.ShowEvents { - events, _ = searchEvents(c.client.CoreV1(), svcV1alpha1, describerSettings.ChunkSize) + events, _ = searchEvents(c.client.CoreV1(), svcV1beta1, describerSettings.ChunkSize) } - return c.describeServiceCIDRV1alpha1(svcV1alpha1, events) + return c.describeServiceCIDRV1beta1(svcV1beta1, events) } return "", err } -func (c *ServiceCIDRDescriber) describeServiceCIDRV1alpha1(svc *networkingv1alpha1.ServiceCIDR, events *corev1.EventList) (string, error) { +func (c *ServiceCIDRDescriber) describeServiceCIDRV1beta1(svc *networkingv1beta1.ServiceCIDR, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%v\n", svc.Name) @@ -2916,17 +2919,17 @@ type IPAddressDescriber struct { func (c *IPAddressDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { var events *corev1.EventList - ipV1alpha1, err := c.client.NetworkingV1alpha1().IPAddresses().Get(context.TODO(), name, metav1.GetOptions{}) + ipV1beta1, err := c.client.NetworkingV1beta1().IPAddresses().Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { if describerSettings.ShowEvents { - events, _ = searchEvents(c.client.CoreV1(), ipV1alpha1, describerSettings.ChunkSize) + events, _ = searchEvents(c.client.CoreV1(), ipV1beta1, describerSettings.ChunkSize) } - return c.describeIPAddressV1alpha1(ipV1alpha1, events) + return c.describeIPAddressV1beta1(ipV1beta1, events) } return "", err } -func (c *IPAddressDescriber) describeIPAddressV1alpha1(ip *networkingv1alpha1.IPAddress, events *corev1.EventList) (string, error) { +func (c *IPAddressDescriber) describeIPAddressV1beta1(ip *networkingv1beta1.IPAddress, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%v\n", ip.Name) @@ -2961,12 +2964,14 @@ func (d *ServiceDescriber) Describe(namespace, name string, describerSettings De return "", err } - endpoints, _ := d.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + endpointSliceList, _ := d.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, name), + }) var events *corev1.EventList if describerSettings.ShowEvents { events, _ = searchEvents(d.CoreV1(), service, describerSettings.ChunkSize) } - return describeService(service, endpoints, events) + return describeService(service, endpointSliceList.Items, events) } func buildIngressString(ingress []corev1.LoadBalancerIngress) string { @@ -2978,6 +2983,9 @@ func buildIngressString(ingress []corev1.LoadBalancerIngress) string { } if ingress[i].IP != "" { buffer.WriteString(ingress[i].IP) + if ingress[i].IPMode != nil { + buffer.WriteString(fmt.Sprintf(" (%s)", *ingress[i].IPMode)) + } } else { buffer.WriteString(ingress[i].Hostname) } @@ -2985,10 +2993,7 @@ func buildIngressString(ingress []corev1.LoadBalancerIngress) string { return buffer.String() } -func describeService(service *corev1.Service, endpoints *corev1.Endpoints, events *corev1.EventList) (string, error) { - if endpoints == nil { - endpoints = &corev1.Endpoints{} - } +func describeService(service *corev1.Service, endpointSlices []discoveryv1.EndpointSlice, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", service.Name) @@ -3024,7 +3029,7 @@ func describeService(service *corev1.Service, endpoints *corev1.Endpoints, event w.Write(LEVEL_0, "External IPs:\t%v\n", strings.Join(service.Spec.ExternalIPs, ",")) } if service.Spec.LoadBalancerIP != "" { - w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.LoadBalancerIP) + w.Write(LEVEL_0, "Desired LoadBalancer IP:\t%s\n", service.Spec.LoadBalancerIP) } if service.Spec.ExternalName != "" { w.Write(LEVEL_0, "External Name:\t%s\n", service.Spec.ExternalName) @@ -3049,12 +3054,15 @@ func describeService(service *corev1.Service, endpoints *corev1.Endpoints, event if sp.NodePort != 0 { w.Write(LEVEL_0, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) } - w.Write(LEVEL_0, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name))) + w.Write(LEVEL_0, "Endpoints:\t%s\n", formatEndpointSlices(endpointSlices, sets.New(sp.Name))) } w.Write(LEVEL_0, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) if service.Spec.ExternalTrafficPolicy != "" { w.Write(LEVEL_0, "External Traffic Policy:\t%s\n", service.Spec.ExternalTrafficPolicy) } + if service.Spec.InternalTrafficPolicy != nil { + w.Write(LEVEL_0, "Internal Traffic Policy:\t%s\n", *service.Spec.InternalTrafficPolicy) + } if service.Spec.HealthCheckNodePort != 0 { w.Write(LEVEL_0, "HealthCheck NodePort:\t%d\n", service.Spec.HealthCheckNodePort) } @@ -4471,6 +4479,7 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings for k, v := range configMap.Data { w.Write(LEVEL_0, "%s:\n----\n", k) w.Write(LEVEL_0, "%s\n", string(v)) + w.Write(LEVEL_0, "\n") } w.Write(LEVEL_0, "\nBinaryData\n====\n") for k, v := range configMap.BinaryData { @@ -4576,7 +4585,11 @@ func printNetworkPolicySpecIngressFrom(npirs []networkingv1.NetworkPolicyIngress } else { proto = corev1.ProtocolTCP } - w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + if port.EndPort == nil { + w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + } else { + w.Write(LEVEL_0, "%s%s: %s-%d/%s\n", initialIndent, "To Port Range", port.Port, *port.EndPort, proto) + } } } if len(npir.From) == 0 { @@ -4620,7 +4633,11 @@ func printNetworkPolicySpecEgressTo(npers []networkingv1.NetworkPolicyEgressRule } else { proto = corev1.ProtocolTCP } - w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + if port.EndPort == nil { + w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + } else { + w.Write(LEVEL_0, "%s%s: %s-%d/%s\n", initialIndent, "To Port Range", port.Port, *port.EndPort, proto) + } } } if len(nper.To) == 0 { @@ -4705,7 +4722,7 @@ type VolumeAttributesClassDescriber struct { } func (d *VolumeAttributesClassDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - vac, err := d.StorageV1alpha1().VolumeAttributesClasses().Get(context.TODO(), name, metav1.GetOptions{}) + vac, err := d.StorageV1beta1().VolumeAttributesClasses().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -4718,7 +4735,7 @@ func (d *VolumeAttributesClassDescriber) Describe(namespace, name string, descri return describeVolumeAttributesClass(vac, events) } -func describeVolumeAttributesClass(vac *storagev1alpha1.VolumeAttributesClass, events *corev1.EventList) (string, error) { +func describeVolumeAttributesClass(vac *storagev1beta1.VolumeAttributesClass, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", vac.Name) @@ -5410,39 +5427,38 @@ func translateTimestampSince(timestamp metav1.Time) string { } // Pass ports=nil for all ports. -func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string { - if len(endpoints.Subsets) == 0 { +func formatEndpointSlices(endpointSlices []discoveryv1.EndpointSlice, ports sets.Set[string]) string { + if len(endpointSlices) == 0 { return "" } - list := []string{} + var list []string max := 3 more := false count := 0 - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - if len(ss.Ports) == 0 { + for i := range endpointSlices { + if len(endpointSlices[i].Ports) == 0 { // It's possible to have headless services with no ports. - for i := range ss.Addresses { + for j := range endpointSlices[i].Endpoints { if len(list) == max { more = true } if !more { - list = append(list, ss.Addresses[i].IP) + list = append(list, endpointSlices[i].Endpoints[j].Addresses[0]) } count++ } } else { // "Normal" services with ports defined. - for i := range ss.Ports { - port := &ss.Ports[i] - if ports == nil || ports.Has(port.Name) { - for i := range ss.Addresses { + for j := range endpointSlices[i].Ports { + port := endpointSlices[i].Ports[j] + if ports == nil || ports.Has(*port.Name) { + for k := range endpointSlices[i].Endpoints { if len(list) == max { more = true } - addr := &ss.Addresses[i] + addr := endpointSlices[i].Endpoints[k].Addresses[0] if !more { - hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))) + hostPort := net.JoinHostPort(addr, strconv.Itoa(int(*port.Port))) list = append(list, hostPort) } count++ diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go index 762d953f2c..30adb24ed6 100644 --- a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go @@ -36,15 +36,15 @@ import ( watchtools "k8s.io/client-go/tools/watch" ) -// GetFirstPod returns a pod matching the namespace and label selector -// and the number of all pods that match the label selector. -func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*corev1.Pod) sort.Interface) (*corev1.Pod, int, error) { +// GetPodList returns a PodList matching the namespace and label selector +func GetPodList(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*corev1.Pod) sort.Interface) (*corev1.PodList, error) { options := metav1.ListOptions{LabelSelector: selector} podList, err := client.Pods(namespace).List(context.TODO(), options) if err != nil { - return nil, 0, err + return nil, err } + pods := []*corev1.Pod{} for i := range podList.Items { pod := podList.Items[i] @@ -52,14 +52,17 @@ func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string } if len(pods) > 0 { sort.Sort(sortBy(pods)) - return pods[0], len(podList.Items), nil + for i, pod := range pods { + podList.Items[i] = *pod + } + return podList, nil } // Watch until we observe a pod options.ResourceVersion = podList.ResourceVersion w, err := client.Pods(namespace).Watch(context.TODO(), options) if err != nil { - return nil, 0, err + return nil, err } defer w.Stop() @@ -71,13 +74,29 @@ func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string defer cancel() event, err := watchtools.UntilWithoutRetry(ctx, w, condition) if err != nil { - return nil, 0, err + return nil, err } - pod, ok := event.Object.(*corev1.Pod) + + po, ok := event.Object.(*corev1.Pod) if !ok { - return nil, 0, fmt.Errorf("%#v is not a pod event", event) + return nil, fmt.Errorf("%#v is not a pod event", event) + } else { + podList.Items = append(podList.Items, *po) + } + return podList, nil +} + +// GetFirstPod returns a pod matching the namespace and label selector +// and the number of all pods that match the label selector. +func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*corev1.Pod) sort.Interface) (*corev1.Pod, int, error) { + + podList, err := GetPodList(client, namespace, selector, timeout, sortBy) + if err != nil { + return nil, 0, err } - return pod, 1, nil + + return &podList.Items[0], len(podList.Items), nil + } // SelectorsForObject returns the pod label selector for a given object diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go index 89c4e9facf..ad8d1d6d58 100644 --- a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go @@ -27,6 +27,12 @@ import ( "k8s.io/client-go/rest" ) +// AllPodLogsForObjectFunc is a function type that can tell you how to get logs for a runtime.object +type AllPodLogsForObjectFunc func(restClientGetter genericclioptions.RESTClientGetter, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[v1.ObjectReference]rest.ResponseWrapper, error) + +// AllPodLogsForObjectFn gives a way to easily override the function for unit testing if needed. +var AllPodLogsForObjectFn AllPodLogsForObjectFunc = allPodLogsForObject + // LogsForObjectFunc is a function type that can tell you how to get logs for a runtime.object type LogsForObjectFunc func(restClientGetter genericclioptions.RESTClientGetter, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[v1.ObjectReference]rest.ResponseWrapper, error) diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go index c436c06d31..87968b789d 100644 --- a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go @@ -34,6 +34,19 @@ import ( "k8s.io/kubectl/pkg/util/podutils" ) +func allPodLogsForObject(restClientGetter genericclioptions.RESTClientGetter, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[corev1.ObjectReference]rest.ResponseWrapper, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + + clientset, err := corev1client.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + return logsForObjectWithClient(clientset, object, options, timeout, allContainers, true) +} + func logsForObject(restClientGetter genericclioptions.RESTClientGetter, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[corev1.ObjectReference]rest.ResponseWrapper, error) { clientConfig, err := restClientGetter.ToRESTConfig() if err != nil { @@ -44,11 +57,11 @@ func logsForObject(restClientGetter genericclioptions.RESTClientGetter, object, if err != nil { return nil, err } - return logsForObjectWithClient(clientset, object, options, timeout, allContainers) + return logsForObjectWithClient(clientset, object, options, timeout, allContainers, false) } // this is split for easy test-ability -func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[corev1.ObjectReference]rest.ResponseWrapper, error) { +func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, options runtime.Object, timeout time.Duration, allContainers bool, allPods bool) (map[corev1.ObjectReference]rest.ResponseWrapper, error) { opts, ok := options.(*corev1.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") @@ -58,7 +71,7 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt case *corev1.PodList: ret := make(map[corev1.ObjectReference]rest.ResponseWrapper) for i := range t.Items { - currRet, err := logsForObjectWithClient(clientset, &t.Items[i], options, timeout, allContainers) + currRet, err := logsForObjectWithClient(clientset, &t.Items[i], options, timeout, allContainers, allPods) if err != nil { return nil, err } @@ -95,7 +108,9 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt // Default to the first container name(aligning behavior with `kubectl exec'). currOpts.Container = t.Spec.Containers[0].Name if len(t.Spec.Containers) > 1 || len(t.Spec.InitContainers) > 0 || len(t.Spec.EphemeralContainers) > 0 { - fmt.Fprintf(os.Stderr, "Defaulted container %q out of: %s\n", currOpts.Container, podcmd.AllContainerNames(t)) + if !allPods { + fmt.Fprintf(os.Stderr, "Defaulted container %q out of: %s\n", currOpts.Container, podcmd.AllContainerNames(t)) + } } } @@ -117,7 +132,7 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt for _, c := range t.Spec.InitContainers { currOpts := opts.DeepCopy() currOpts.Container = c.Name - currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) + currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false, false) if err != nil { return nil, err } @@ -128,7 +143,7 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt for _, c := range t.Spec.Containers { currOpts := opts.DeepCopy() currOpts.Container = c.Name - currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) + currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false, false) if err != nil { return nil, err } @@ -139,7 +154,7 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt for _, c := range t.Spec.EphemeralContainers { currOpts := opts.DeepCopy() currOpts.Container = c.Name - currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) + currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false, false) if err != nil { return nil, err } @@ -161,9 +176,18 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt if err != nil { return nil, err } + var targetObj runtime.Object = pod + if numPods > 1 { - fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) + if allPods { + targetObj, err = GetPodList(clientset, namespace, selector.String(), timeout, sortBy) + if err != nil { + return nil, err + } + } else { + fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) + } } - return logsForObjectWithClient(clientset, pod, options, timeout, allContainers) + return logsForObjectWithClient(clientset, targetObj, options, timeout, allContainers, allPods) } diff --git a/vendor/k8s.io/kubectl/pkg/util/completion/completion.go b/vendor/k8s.io/kubectl/pkg/util/completion/completion.go index ef1ebbeaa1..f88156484f 100644 --- a/vendor/k8s.io/kubectl/pkg/util/completion/completion.go +++ b/vendor/k8s.io/kubectl/pkg/util/completion/completion.go @@ -94,6 +94,36 @@ func PodResourceNameCompletionFunc(f cmdutil.Factory) func(*cobra.Command, []str } } +// ResourceAndPortCompletionFunc Returns a completion function that completes, as a first argument: +// 1- resources that match the toComplete prefix +// 2- the ports of the specific resource. i.e: container ports for pod resources and port for services +func ResourceAndPortCompletionFunc(f cmdutil.Factory) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + var resourceType string + directive := cobra.ShellCompDirectiveNoFileComp + if len(args) == 0 { + comps, directive = doPodResourceCompletion(f, toComplete) + } else if len(args) == 1 { + t := strings.Split(args[0], "/") + // if we specify directly the pod, then resource type is pod, otherwise it would be the resource type passed + // by the user + if len(t) == 1 { + resourceType = "pod" + } else { + resourceType = t[0] + } + if resourceType == "service" || resourceType == "services" || resourceType == "svc" { + comps = CompGetServicePorts(f, t[1], toComplete) + } else { + podName := convertResourceNameToPodName(f, args[0]) + comps = CompGetPodContainerPorts(f, podName, toComplete) + } + } + return comps, directive + } +} + // PodResourceNameAndContainerCompletionFunc Returns a completion function that completes, as a first argument: // 1- pod names that match the toComplete prefix // 2- resource types containing pods which match the toComplete prefix @@ -167,6 +197,30 @@ func CompGetContainers(f cmdutil.Factory, podName string, toComplete string) []s return CompGetFromTemplate(&template, f, "", []string{"pod", podName}, toComplete) } +// CompGetPodContainerPorts retrieves the list of ports for containers within the specified pod that start with `toComplete`. +func CompGetPodContainerPorts(f cmdutil.Factory, podName string, toComplete string) []string { + var template string + exposedPort := strings.Split(toComplete, ":")[0] + + if toComplete == "" { + exposedPort = "{{ .containerPort }}" + } + template = fmt.Sprintf("{{ range .spec.containers }}{{ range .ports }}%s:{{ .containerPort }} {{ end }}{{ end }}", exposedPort) + return CompGetFromTemplate(&template, f, "", []string{"pod", podName}, toComplete) +} + +// CompGetServicePorts gets the list of ports of the specified service which begin with `toComplete`. +func CompGetServicePorts(f cmdutil.Factory, serviceName string, toComplete string) []string { + var template string + exposedPort := strings.Split(toComplete, ":")[0] + + if toComplete == "" { + exposedPort = "{{ .port }}" + } + template = fmt.Sprintf("{{ range .spec.ports }}%s:{{ .port }} {{ end }}", exposedPort) + return CompGetFromTemplate(&template, f, "", []string{"service", serviceName}, toComplete) +} + // CompGetFromTemplate executes a Get operation using the specified template and args and returns the results // which begin with `toComplete`. func CompGetFromTemplate(template *string, f cmdutil.Factory, namespace string, args []string, toComplete string) []string { diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po index 7036ad59aa..f7280b4fe3 100644 --- a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po @@ -432,7 +432,7 @@ msgstr "Create a service account with the specified name" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 #: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 msgid "Delete the specified cluster from the kubeconfig" -msgstr "指定したコンテキストをkubeconfigから削除する" +msgstr "指定したクラスターをkubeconfigから削除する" # https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 #: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 diff --git a/vendor/k8s.io/kubectl/pkg/util/pod_port.go b/vendor/k8s.io/kubectl/pkg/util/pod_port.go index 6d78501a89..bcd2c72818 100644 --- a/vendor/k8s.io/kubectl/pkg/util/pod_port.go +++ b/vendor/k8s.io/kubectl/pkg/util/pod_port.go @@ -31,6 +31,12 @@ func LookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) { } } } - + for _, ctr := range pod.Spec.InitContainers { + for _, ctrportspec := range ctr.Ports { + if ctrportspec.Name == name { + return ctrportspec.ContainerPort, nil + } + } + } return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name) } diff --git a/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go b/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go index 54355503f6..b63a17ace5 100644 --- a/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go +++ b/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go @@ -99,6 +99,9 @@ const ( SystemContainerMisc = "misc" // SystemContainerPods is the container name for the system container tracking user pods. SystemContainerPods = "pods" + // SystemContainerWindowsGlobalCommitMemory (only used on Windows) is the container name for the system container + // tracking global commit memory usage and is used for memory-pressure eviction. + SystemContainerWindowsGlobalCommitMemory = "windows-global-commit-memory" ) // ProcessStats are stats pertaining to processes. diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto index 8772b46535..be308d49ad 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto @@ -36,7 +36,7 @@ message ContainerMetrics { optional string name = 1; // The memory usage is the memory working set. - map usage = 2; + map usage = 2; } // NodeMetrics sets resource usage metrics of a node. @@ -44,23 +44,23 @@ message NodeMetrics { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The following fields define time interval from which metrics were // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; // The memory usage is the memory working set. - map usage = 4; + map usage = 4; } // NodeMetricsList is a list of NodeMetrics. message NodeMetricsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of node metrics. repeated NodeMetrics items = 2; @@ -71,13 +71,13 @@ message PodMetrics { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The following fields define time interval from which metrics were // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; // Metrics for all containers are collected within the same time window. // +listType=atomic @@ -88,7 +88,7 @@ message PodMetrics { message PodMetricsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pod metrics. repeated PodMetrics items = 2; diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto index aa739d2121..602907938a 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto @@ -36,7 +36,7 @@ message ContainerMetrics { optional string name = 1; // The memory usage is the memory working set. - map usage = 2; + map usage = 2; } // NodeMetrics sets resource usage metrics of a node. @@ -44,23 +44,23 @@ message NodeMetrics { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The following fields define time interval from which metrics were // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; // The memory usage is the memory working set. - map usage = 4; + map usage = 4; } // NodeMetricsList is a list of NodeMetrics. message NodeMetricsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of node metrics. repeated NodeMetrics items = 2; @@ -71,13 +71,13 @@ message PodMetrics { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The following fields define time interval from which metrics were // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; // Metrics for all containers are collected within the same time window. // +listType=atomic @@ -88,7 +88,7 @@ message PodMetrics { message PodMetricsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pod metrics. repeated PodMetrics items = 2; diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go index a312221ed2..7a95775a7b 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go @@ -20,11 +20,10 @@ package v1beta1 import ( "context" - "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" ) @@ -45,54 +44,18 @@ type NodeMetricsInterface interface { // nodeMetricses implements NodeMetricsInterface type nodeMetricses struct { - client rest.Interface + *gentype.ClientWithList[*v1beta1.NodeMetrics, *v1beta1.NodeMetricsList] } // newNodeMetricses returns a NodeMetricses func newNodeMetricses(c *MetricsV1beta1Client) *nodeMetricses { return &nodeMetricses{ - client: c.RESTClient(), + gentype.NewClientWithList[*v1beta1.NodeMetrics, *v1beta1.NodeMetricsList]( + "nodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.NodeMetrics { return &v1beta1.NodeMetrics{} }, + func() *v1beta1.NodeMetricsList { return &v1beta1.NodeMetricsList{} }), } } - -// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. -func (c *nodeMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NodeMetrics, err error) { - result = &v1beta1.NodeMetrics{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. -func (c *nodeMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NodeMetricsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.NodeMetricsList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodeMetricses. -func (c *nodeMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go index e66c377c25..b4724af954 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go @@ -20,11 +20,10 @@ package v1beta1 import ( "context" - "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" ) @@ -45,59 +44,18 @@ type PodMetricsInterface interface { // podMetricses implements PodMetricsInterface type podMetricses struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1beta1.PodMetrics, *v1beta1.PodMetricsList] } // newPodMetricses returns a PodMetricses func newPodMetricses(c *MetricsV1beta1Client, namespace string) *podMetricses { return &podMetricses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1beta1.PodMetrics, *v1beta1.PodMetricsList]( + "pods", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.PodMetrics { return &v1beta1.PodMetrics{} }, + func() *v1beta1.PodMetricsList { return &v1beta1.PodMetricsList{} }), } } - -// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. -func (c *podMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodMetrics, err error) { - result = &v1beta1.PodMetrics{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. -func (c *podMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodMetricsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodMetricsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podMetricses. -func (c *podMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} diff --git a/vendor/k8s.io/pod-security-admission/policy/check_procMount.go b/vendor/k8s.io/pod-security-admission/policy/check_procMount.go index 282dbb32ce..8ec585b9c9 100644 --- a/vendor/k8s.io/pod-security-admission/policy/check_procMount.go +++ b/vendor/k8s.io/pod-security-admission/policy/check_procMount.go @@ -35,6 +35,9 @@ spec.initContainers[*].securityContext.procMount **Allowed Values:** undefined/null, "Default" +However, if the pod is in a user namespace (`hostUsers: false`), and the +UserNamespacesPodSecurityStandards feature is enabled, all values are allowed. + */ func init() { @@ -58,6 +61,14 @@ func CheckProcMount() Check { } func procMount_1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + // TODO: When we remove the UserNamespacesPodSecurityStandards feature gate (and GA this relaxation), + // create a new policy version. + // Note: pod validation will check for well formed procMount type, so avoid double validation and allow everything + // here. + if relaxPolicyForUserNamespacePod(podSpec) { + return CheckResult{Allowed: true} + } + var badContainers []string forbiddenProcMountTypes := sets.NewString() visitContainers(podSpec, func(container *corev1.Container) { diff --git a/vendor/k8s.io/pod-security-admission/policy/check_seLinuxOptions.go b/vendor/k8s.io/pod-security-admission/policy/check_seLinuxOptions.go index ec832ac5c8..b4263d23b9 100644 --- a/vendor/k8s.io/pod-security-admission/policy/check_seLinuxOptions.go +++ b/vendor/k8s.io/pod-security-admission/policy/check_seLinuxOptions.go @@ -64,17 +64,30 @@ func CheckSELinuxOptions() Check { Versions: []VersionedCheck{ { MinimumVersion: api.MajorMinorVersion(1, 0), - CheckPod: seLinuxOptions_1_0, + CheckPod: seLinuxOptions1_0, + }, + { + MinimumVersion: api.MajorMinorVersion(1, 31), + CheckPod: seLinuxOptions1_31, }, }, } } var ( - selinux_allowed_types_1_0 = sets.NewString("", "container_t", "container_init_t", "container_kvm_t") + selinuxAllowedTypes1_0 = sets.New("", "container_t", "container_init_t", "container_kvm_t") + selinuxAllowedTypes1_31 = sets.New("", "container_t", "container_init_t", "container_kvm_t", "container_engine_t") ) -func seLinuxOptions_1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { +func seLinuxOptions1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return seLinuxOptions(podMetadata, podSpec, selinuxAllowedTypes1_0) +} + +func seLinuxOptions1_31(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return seLinuxOptions(podMetadata, podSpec, selinuxAllowedTypes1_31) +} + +func seLinuxOptions(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec, allowedTypes sets.Set[string]) CheckResult { var ( // sources that set bad seLinuxOptions badSetters []string @@ -89,7 +102,7 @@ func seLinuxOptions_1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) validSELinuxOptions := func(opts *corev1.SELinuxOptions) bool { valid := true - if !selinux_allowed_types_1_0.Has(opts.Type) { + if !allowedTypes.Has(opts.Type) { valid = false badTypes.Insert(opts.Type) } diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 0000000000..7cb7795bec --- /dev/null +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b572699811..f1bfdc1a81 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -39,9 +39,9 @@ github.com/Sytten/logrus-zap-hook # github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d ## explicit github.com/acarl005/stripansi -# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df -## explicit; go 1.18 -github.com/antlr/antlr4/runtime/Go/antlr/v4 +# github.com/antlr4-go/antlr/v4 v4.13.1 +## explicit; go 1.22 +github.com/antlr4-go/antlr/v4 # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator @@ -93,7 +93,7 @@ github.com/coreos/go-semver/semver ## explicit; go 1.12 github.com/coreos/go-systemd/v22/daemon github.com/coreos/go-systemd/v22/journal -# github.com/cpuguy83/go-md2man/v2 v2.0.3 +# github.com/cpuguy83/go-md2man/v2 v2.0.4 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man # github.com/creack/pty v1.1.21 @@ -120,7 +120,7 @@ github.com/docker/docker-credential-helpers/credentials # github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960 ## explicit; go 1.13 github.com/dprotaso/go-yit -# github.com/emicklei/go-restful/v3 v3.11.2 +# github.com/emicklei/go-restful/v3 v3.12.1 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -148,9 +148,9 @@ github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/fvbommel/sortorder v1.1.0 -## explicit; go 1.13 -github.com/fvbommel/sortorder +# github.com/fxamacker/cbor/v2 v2.7.0 +## explicit; go 1.17 +github.com/fxamacker/cbor/v2 # github.com/ghodss/yaml v1.0.0 ## explicit github.com/ghodss/yaml @@ -176,11 +176,11 @@ github.com/go-openapi/analysis/internal/flatten/sortref # github.com/go-openapi/errors v0.20.2 ## explicit; go 1.14 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.20.2 -## explicit; go 1.19 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.4 -## explicit; go 1.19 +# github.com/go-openapi/jsonreference v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal # github.com/go-openapi/loads v0.21.2 @@ -192,8 +192,8 @@ github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.21.3 ## explicit; go 1.13 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.7 -## explicit; go 1.19 +# github.com/go-openapi/swag v0.23.0 +## explicit; go 1.20 github.com/go-openapi/swag # github.com/go-task/slim-sprig/v3 v3.0.0 ## explicit; go 1.20 @@ -214,7 +214,7 @@ github.com/golang/protobuf/ptypes/empty # github.com/google/btree v1.1.2 ## explicit; go 1.18 github.com/google/btree -# github.com/google/cel-go v0.17.8 +# github.com/google/cel-go v0.21.0 ## explicit; go 1.18 github.com/google/cel-go/cel github.com/google/cel-go/checker @@ -265,7 +265,7 @@ github.com/google/go-querystring/query ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 +# github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 ## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -274,8 +274,8 @@ github.com/google/shlex # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gorilla/websocket v1.5.1 -## explicit; go 1.20 +# github.com/gorilla/websocket v1.5.3 +## explicit; go 1.12 github.com/gorilla/websocket # github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 ## explicit @@ -283,8 +283,8 @@ github.com/gregjones/httpcache # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 -## explicit; go 1.19 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 +## explicit; go 1.21 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -334,9 +334,21 @@ github.com/k0kubun/go-ansi # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote +# github.com/klauspost/compress v1.17.10 +## explicit; go 1.21 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 ## explicit; go 1.15 github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1 +# github.com/kylelemons/godebug v1.1.0 +## explicit; go 1.11 +github.com/kylelemons/godebug/diff # github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de ## explicit github.com/liggitt/tabwriter @@ -420,7 +432,7 @@ github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/moby/spdystream v0.2.0 +# github.com/moby/spdystream v0.5.0 ## explicit; go 1.13 github.com/moby/spdystream github.com/moby/spdystream/spdy @@ -454,7 +466,7 @@ github.com/oklog/ulid github.com/olekukonko/tablewriter # github.com/onsi/ginkgo v1.16.5 ## explicit; go 1.16 -# github.com/onsi/ginkgo/v2 v2.17.2 +# github.com/onsi/ginkgo/v2 v2.19.0 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -497,8 +509,10 @@ github.com/peterbourgon/diskv # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.18.0 -## explicit; go 1.19 +# github.com/prometheus/client_golang v1.20.4 +## explicit; go 1.20 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal @@ -509,10 +523,9 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.46.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.60.0 +## explicit; go 1.21 github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 ## explicit; go 1.20 @@ -539,7 +552,7 @@ github.com/sirupsen/logrus # github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 ## explicit github.com/skratchdot/open-golang/open -# github.com/spf13/cobra v1.8.0 +# github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra github.com/spf13/cobra/doc @@ -564,27 +577,30 @@ github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath # github.com/wk8/go-ordered-map/v2 v2.1.8 ## explicit; go 1.18 github.com/wk8/go-ordered-map/v2 +# github.com/x448/float16 v0.8.4 +## explicit; go 1.11 +github.com/x448/float16 # github.com/xlab/treeprint v1.2.0 ## explicit; go 1.13 github.com/xlab/treeprint -# go.etcd.io/etcd/api/v3 v3.5.14 -## explicit; go 1.21 +# go.etcd.io/etcd/api/v3 v3.5.16 +## explicit; go 1.22 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.14 -## explicit; go 1.21 +# go.etcd.io/etcd/client/pkg/v3 v3.5.16 +## explicit; go 1.22 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.14 -## explicit; go 1.21 +# go.etcd.io/etcd/client/v3 v3.5.16 +## explicit; go 1.22 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint @@ -598,17 +614,18 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 +## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 +## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel v1.30.0 +## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -622,37 +639,37 @@ go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.25.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 +## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 +## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/metric v1.30.0 +## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/sdk v1.30.0 +## explicit; go 1.22 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/trace v1.30.0 +## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.1.0 +# go.opentelemetry.io/proto/otlp v1.3.1 ## explicit; go 1.17 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 @@ -684,8 +701,8 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.24.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.28.0 +## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/cast5 @@ -703,15 +720,15 @@ golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 +## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.18.0 -## explicit; go 1.18 +# golang.org/x/mod v0.21.0 +## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.26.0 +# golang.org/x/net v0.30.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -726,25 +743,25 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.7.0 +# golang.org/x/sync v0.8.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.21.0 +# golang.org/x/sys v0.26.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.21.0 +# golang.org/x/term v0.25.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.16.0 +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding @@ -776,34 +793,35 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.7.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.22.0 -## explicit; go 1.19 +# golang.org/x/tools v0.26.0 +## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.64.0 -## explicit; go 1.19 +# google.golang.org/grpc v1.67.1 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -814,7 +832,9 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -827,7 +847,6 @@ google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle @@ -839,11 +858,13 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/reflection @@ -857,8 +878,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.1 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -901,6 +922,9 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb +# gopkg.in/evanphx/json-patch.v4 v4.12.0 +## explicit +gopkg.in/evanphx/json-patch.v4 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 @@ -928,7 +952,7 @@ gotest.tools/v3/internal/assert gotest.tools/v3/internal/difflib gotest.tools/v3/internal/format gotest.tools/v3/internal/source -# k8s.io/api v0.30.2 +# k8s.io/api v0.31.1 ## explicit; go 1.22.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -956,6 +980,7 @@ k8s.io/api/certificates/v1 k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 +k8s.io/api/coordination/v1alpha1 k8s.io/api/coordination/v1beta1 k8s.io/api/core/v1 k8s.io/api/discovery/v1 @@ -979,7 +1004,7 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/resource/v1alpha2 +k8s.io/api/resource/v1alpha3 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 @@ -987,7 +1012,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.30.2 +# k8s.io/apiextensions-apiserver v0.31.1 ## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -998,12 +1023,13 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.30.2 +# k8s.io/apimachinery v0.31.1 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/meta/table +k8s.io/apimachinery/pkg/api/meta/testrestmapper k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/api/validation/path @@ -1023,6 +1049,8 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct +k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer @@ -1066,7 +1094,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.30.2 +# k8s.io/apiserver v0.31.1 ## explicit; go 1.22.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1172,6 +1200,7 @@ k8s.io/apiserver/pkg/server/options/encryptionconfig/controller k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics k8s.io/apiserver/pkg/server/resourceconfig k8s.io/apiserver/pkg/server/routes +k8s.io/apiserver/pkg/server/routine k8s.io/apiserver/pkg/server/storage k8s.io/apiserver/pkg/storage k8s.io/apiserver/pkg/storage/cacher @@ -1179,6 +1208,7 @@ k8s.io/apiserver/pkg/storage/cacher/metrics k8s.io/apiserver/pkg/storage/errors k8s.io/apiserver/pkg/storage/etcd3 k8s.io/apiserver/pkg/storage/etcd3/metrics +k8s.io/apiserver/pkg/storage/feature k8s.io/apiserver/pkg/storage/names k8s.io/apiserver/pkg/storage/storagebackend k8s.io/apiserver/pkg/storage/storagebackend/factory @@ -1208,6 +1238,7 @@ k8s.io/apiserver/pkg/util/peerproxy/metrics k8s.io/apiserver/pkg/util/proxy k8s.io/apiserver/pkg/util/proxy/metrics k8s.io/apiserver/pkg/util/shufflesharding +k8s.io/apiserver/pkg/util/version k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning @@ -1218,14 +1249,15 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.30.2 +# k8s.io/cli-runtime v0.31.1 ## explicit; go 1.22.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.30.2 +# k8s.io/client-go v0.31.1 ## explicit; go 1.22.0 +k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1243,6 +1275,7 @@ k8s.io/client-go/applyconfigurations/certificates/v1 k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 +k8s.io/client-go/applyconfigurations/coordination/v1alpha1 k8s.io/client-go/applyconfigurations/coordination/v1beta1 k8s.io/client-go/applyconfigurations/core/v1 k8s.io/client-go/applyconfigurations/discovery/v1 @@ -1254,6 +1287,7 @@ k8s.io/client-go/applyconfigurations/flowcontrol/v1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 +k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 @@ -1267,7 +1301,7 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 -k8s.io/client-go/applyconfigurations/resource/v1alpha2 +k8s.io/client-go/applyconfigurations/resource/v1alpha3 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -1284,6 +1318,7 @@ k8s.io/client-go/dynamic/dynamicinformer k8s.io/client-go/dynamic/dynamiclister k8s.io/client-go/dynamic/fake k8s.io/client-go/features +k8s.io/client-go/gentype k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1 @@ -1309,6 +1344,7 @@ k8s.io/client-go/informers/certificates/v1alpha1 k8s.io/client-go/informers/certificates/v1beta1 k8s.io/client-go/informers/coordination k8s.io/client-go/informers/coordination/v1 +k8s.io/client-go/informers/coordination/v1alpha1 k8s.io/client-go/informers/coordination/v1beta1 k8s.io/client-go/informers/core k8s.io/client-go/informers/core/v1 @@ -1342,7 +1378,7 @@ k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/informers/rbac/v1beta1 k8s.io/client-go/informers/resource -k8s.io/client-go/informers/resource/v1alpha2 +k8s.io/client-go/informers/resource/v1alpha3 k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 @@ -1400,6 +1436,8 @@ k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1/fake +k8s.io/client-go/kubernetes/typed/coordination/v1alpha1 +k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1 @@ -1444,8 +1482,8 @@ k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake k8s.io/client-go/kubernetes/typed/rbac/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake -k8s.io/client-go/kubernetes/typed/resource/v1alpha2 -k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake +k8s.io/client-go/kubernetes/typed/resource/v1alpha3 +k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 @@ -1460,6 +1498,7 @@ k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1 k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake +k8s.io/client-go/listers k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1alpha1 k8s.io/client-go/listers/admissionregistration/v1beta1 @@ -1477,6 +1516,7 @@ k8s.io/client-go/listers/certificates/v1 k8s.io/client-go/listers/certificates/v1alpha1 k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 +k8s.io/client-go/listers/coordination/v1alpha1 k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/core/v1 k8s.io/client-go/listers/discovery/v1 @@ -1499,7 +1539,7 @@ k8s.io/client-go/listers/policy/v1beta1 k8s.io/client-go/listers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 -k8s.io/client-go/listers/resource/v1alpha2 +k8s.io/client-go/listers/resource/v1alpha3 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 @@ -1560,6 +1600,7 @@ k8s.io/client-go/transport/websocket k8s.io/client-go/util/cert k8s.io/client-go/util/certificate/csr k8s.io/client-go/util/connrotation +k8s.io/client-go/util/consistencydetector k8s.io/client-go/util/csaupgrade k8s.io/client-go/util/exec k8s.io/client-go/util/flowcontrol @@ -1567,8 +1608,9 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry +k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.30.2 +# k8s.io/component-base v0.31.1 ## explicit; go 1.22.0 k8s.io/component-base/cli/flag k8s.io/component-base/featuregate @@ -1587,10 +1629,10 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.30.2 +# k8s.io/component-helpers v0.31.1 ## explicit; go 1.22.0 k8s.io/component-helpers/storage/ephemeral -# k8s.io/klog/v2 v2.120.1 +# k8s.io/klog/v2 v2.130.1 ## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1601,13 +1643,13 @@ k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler k8s.io/klog/v2/internal/verbosity k8s.io/klog/v2/textlogger -# k8s.io/kms v0.30.2 +# k8s.io/kms v0.31.1 ## explicit; go 1.22.0 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-aggregator v0.30.2 +# k8s.io/kube-aggregator v0.31.1 ## explicit; go 1.22.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install @@ -1633,11 +1675,13 @@ k8s.io/kube-aggregator/pkg/controllers/openapi k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator k8s.io/kube-aggregator/pkg/controllers/openapiv3 k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator -k8s.io/kube-aggregator/pkg/controllers/status +k8s.io/kube-aggregator/pkg/controllers/status/local +k8s.io/kube-aggregator/pkg/controllers/status/metrics +k8s.io/kube-aggregator/pkg/controllers/status/remote k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/rest -# k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a +# k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 ## explicit; go 1.20 k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/builder @@ -1661,7 +1705,7 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kubectl v0.30.2 +# k8s.io/kubectl v0.31.1 ## explicit; go 1.22.0 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/apiresources @@ -1696,17 +1740,17 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.30.2 +# k8s.io/kubelet v0.31.1 ## explicit; go 1.22.0 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/metrics v0.30.2 +# k8s.io/metrics v0.31.1 ## explicit; go 1.22.0 k8s.io/metrics/pkg/apis/metrics k8s.io/metrics/pkg/apis/metrics/v1alpha1 k8s.io/metrics/pkg/apis/metrics/v1beta1 k8s.io/metrics/pkg/client/clientset/versioned/scheme k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1 -# k8s.io/pod-security-admission v0.30.2 +# k8s.io/pod-security-admission v0.31.1 ## explicit; go 1.22.0 k8s.io/pod-security-admission/admission k8s.io/pod-security-admission/admission/api @@ -1714,7 +1758,7 @@ k8s.io/pod-security-admission/admission/api/validation k8s.io/pod-security-admission/api k8s.io/pod-security-admission/metrics k8s.io/pod-security-admission/policy -# k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 +# k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1735,13 +1779,13 @@ mvdan.cc/sh/v3/expand mvdan.cc/sh/v3/fileutil mvdan.cc/sh/v3/pattern mvdan.cc/sh/v3/syntax -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 -## explicit; go 1.20 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 +## explicit; go 1.21 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/controller-runtime v0.18.4 +# sigs.k8s.io/controller-runtime v0.19.0 ## explicit; go 1.22.0 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder @@ -1778,21 +1822,21 @@ sigs.k8s.io/controller-runtime/pkg/manager/signals sigs.k8s.io/controller-runtime/pkg/metrics sigs.k8s.io/controller-runtime/pkg/metrics/server sigs.k8s.io/controller-runtime/pkg/predicate -sigs.k8s.io/controller-runtime/pkg/ratelimiter sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/source sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission +sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 -## explicit; go 1.19 +# sigs.k8s.io/kustomize/api v0.17.2 +## explicit; go 1.21 sigs.k8s.io/kustomize/api/filters/annotations sigs.k8s.io/kustomize/api/filters/fieldspec sigs.k8s.io/kustomize/api/filters/filtersutil @@ -1812,12 +1856,14 @@ sigs.k8s.io/kustomize/api/filters/suffix sigs.k8s.io/kustomize/api/filters/valueadd sigs.k8s.io/kustomize/api/hasher sigs.k8s.io/kustomize/api/ifc -sigs.k8s.io/kustomize/api/image sigs.k8s.io/kustomize/api/internal/accumulator sigs.k8s.io/kustomize/api/internal/builtins sigs.k8s.io/kustomize/api/internal/generators sigs.k8s.io/kustomize/api/internal/git +sigs.k8s.io/kustomize/api/internal/image +sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts sigs.k8s.io/kustomize/api/internal/kusterr +sigs.k8s.io/kustomize/api/internal/loader sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers sigs.k8s.io/kustomize/api/internal/plugins/execplugin @@ -1828,17 +1874,15 @@ sigs.k8s.io/kustomize/api/internal/target sigs.k8s.io/kustomize/api/internal/utils sigs.k8s.io/kustomize/api/internal/validate sigs.k8s.io/kustomize/api/konfig -sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts sigs.k8s.io/kustomize/api/krusty sigs.k8s.io/kustomize/api/kv -sigs.k8s.io/kustomize/api/loader sigs.k8s.io/kustomize/api/provenance sigs.k8s.io/kustomize/api/provider sigs.k8s.io/kustomize/api/resmap sigs.k8s.io/kustomize/api/resource sigs.k8s.io/kustomize/api/types -# sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 -## explicit; go 1.19 +# sigs.k8s.io/kustomize/kyaml v0.17.1 +## explicit; go 1.21 sigs.k8s.io/kustomize/kyaml/comments sigs.k8s.io/kustomize/kyaml/errors sigs.k8s.io/kustomize/kyaml/ext @@ -1848,7 +1892,6 @@ sigs.k8s.io/kustomize/kyaml/fn/runtime/container sigs.k8s.io/kustomize/kyaml/fn/runtime/exec sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark -sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util sigs.k8s.io/kustomize/kyaml/kio sigs.k8s.io/kustomize/kyaml/kio/filters @@ -1885,3 +1928,4 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 +sigs.k8s.io/yaml/goyaml.v3 diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go index 6af92b448b..da1e37c18b 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc v3.21.12 // source: konnectivity-client/proto/client/client.proto package client diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go index b8d07fe55a..5a0d6a2a84 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.12.4 +// - protoc v3.21.12 // source: konnectivity-client/proto/client/client.proto package client diff --git a/vendor/sigs.k8s.io/controller-runtime/.gitignore b/vendor/sigs.k8s.io/controller-runtime/.gitignore index 294685952b..2ddc5a8b87 100644 --- a/vendor/sigs.k8s.io/controller-runtime/.gitignore +++ b/vendor/sigs.k8s.io/controller-runtime/.gitignore @@ -23,5 +23,8 @@ # Tools binaries. hack/tools/bin +# Release artifacts +tools/setup-envtest/out + junit-report.xml -/artifacts \ No newline at end of file +/artifacts diff --git a/vendor/sigs.k8s.io/controller-runtime/.gomodcheck.yaml b/vendor/sigs.k8s.io/controller-runtime/.gomodcheck.yaml new file mode 100644 index 0000000000..75c5261fde --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/.gomodcheck.yaml @@ -0,0 +1,14 @@ +upstreamRefs: + - k8s.io/api + - k8s.io/apiextensions-apiserver + - k8s.io/apimachinery + - k8s.io/apiserver + - k8s.io/client-go + - k8s.io/component-base + # k8s.io/klog/v2 -> conflicts with k/k deps + # k8s.io/utils -> conflicts with k/k deps + +excludedModules: + # --- test dependencies: + - github.com/onsi/ginkgo/v2 + - github.com/onsi/gomega diff --git a/vendor/sigs.k8s.io/controller-runtime/Makefile b/vendor/sigs.k8s.io/controller-runtime/Makefile index 438613b3eb..9d92b97730 100644 --- a/vendor/sigs.k8s.io/controller-runtime/Makefile +++ b/vendor/sigs.k8s.io/controller-runtime/Makefile @@ -24,6 +24,11 @@ SHELL:=/usr/bin/env bash .DEFAULT_GOAL:=help +# +# Go. +# +GO_VERSION ?= 1.22.5 + # Use GOPROXY environment variable if set GOPROXY := $(shell go env GOPROXY) ifeq ($(GOPROXY),) @@ -34,6 +39,13 @@ export GOPROXY # Active module mode, as we use go modules to manage dependencies export GO111MODULE=on +# Hosts running SELinux need :z added to volume mounts +SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0) + +ifeq ($(SELINUX_ENABLED),1) + DOCKER_VOL_OPTS?=:z +endif + # Tools. TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/bin) @@ -92,6 +104,13 @@ GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint $(GOLANGCI_LINT): # Build golangci-lint from tools folder. GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER) +GO_MOD_CHECK_DIR := $(abspath ./hack/tools/cmd/gomodcheck) +GO_MOD_CHECK := $(abspath $(TOOLS_BIN_DIR)/gomodcheck) +GO_MOD_CHECK_IGNORE := $(abspath .gomodcheck.yaml) +.PHONY: $(GO_MOD_CHECK) +$(GO_MOD_CHECK): # Build gomodcheck + go build -C $(GO_MOD_CHECK_DIR) -o $(GO_MOD_CHECK) + ## -------------------------------------- ## Linting ## -------------------------------------- @@ -116,6 +135,48 @@ modules: ## Runs go mod to ensure modules are up to date. cd $(ENVTEST_DIR); go mod tidy cd $(SCRATCH_ENV_DIR); go mod tidy +## -------------------------------------- +## Release +## -------------------------------------- + +RELEASE_DIR := tools/setup-envtest/out + +.PHONY: $(RELEASE_DIR) +$(RELEASE_DIR): + mkdir -p $(RELEASE_DIR)/ + +.PHONY: release +release: clean-release $(RELEASE_DIR) ## Build release. + @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi + + # Build binaries first. + $(MAKE) release-binaries + +.PHONY: release-binaries +release-binaries: ## Build release binaries. + RELEASE_BINARY=setup-envtest-linux-amd64 GOOS=linux GOARCH=amd64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-linux-arm64 GOOS=linux GOARCH=arm64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-linux-ppc64le GOOS=linux GOARCH=ppc64le $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-linux-s390x GOOS=linux GOARCH=s390x $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-darwin-amd64 GOOS=darwin GOARCH=amd64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-darwin-arm64 GOOS=darwin GOARCH=arm64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-windows-amd64.exe GOOS=windows GOARCH=amd64 $(MAKE) release-binary + +.PHONY: release-binary +release-binary: $(RELEASE_DIR) + docker run \ + --rm \ + -e CGO_ENABLED=0 \ + -e GOOS=$(GOOS) \ + -e GOARCH=$(GOARCH) \ + -e GOCACHE=/tmp/ \ + --user $$(id -u):$$(id -g) \ + -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \ + -w /workspace/tools/setup-envtest \ + golang:$(GO_VERSION) \ + go build -a -trimpath -ldflags "-extldflags '-static'" \ + -o ./out/$(RELEASE_BINARY) ./ + ## -------------------------------------- ## Cleanup / Verification ## -------------------------------------- @@ -129,12 +190,17 @@ clean: ## Cleanup. clean-bin: ## Remove all generated binaries. rm -rf hack/tools/bin +.PHONY: clean-release +clean-release: ## Remove the release folder + rm -rf $(RELEASE_DIR) + .PHONY: verify-modules -verify-modules: modules ## Verify go modules are up to date +verify-modules: modules $(GO_MOD_CHECK) ## Verify go modules are up to date @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(ENVTEST_DIR)/go.mod $(ENVTEST_DIR)/go.sum $(SCRATCH_ENV_DIR)/go.sum); then \ git diff; \ echo "go module files are out of date, please run 'make modules'"; exit 1; \ fi + $(GO_MOD_CHECK) $(GO_MOD_CHECK_IGNORE) APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main) @@ -142,4 +208,11 @@ APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main) verify-apidiff: $(GO_APIDIFF) ## Check for API differences $(GO_APIDIFF) $(APIDIFF_OLD_COMMIT) --print-compatible +## -------------------------------------- +## Helpers +## -------------------------------------- + +##@ helpers: +go-version: ## Print the go version we use to compile our binaries and images + @echo $(GO_VERSION) diff --git a/vendor/sigs.k8s.io/controller-runtime/README.md b/vendor/sigs.k8s.io/controller-runtime/README.md index e785abdd77..7b4f345044 100644 --- a/vendor/sigs.k8s.io/controller-runtime/README.md +++ b/vendor/sigs.k8s.io/controller-runtime/README.md @@ -40,6 +40,25 @@ Contributors: * [Documentation Changes](/.github/PULL_REQUEST_TEMPLATE/docs.md) * [Test/Build/Other Changes](/.github/PULL_REQUEST_TEMPLATE/other.md) +## Compatibility + +Every minor version of controller-runtime has been tested with a specific minor version of client-go. A controller-runtime minor version *may* be compatible with +other client-go minor versions, but this is by chance and neither supported nor tested. In general, we create one minor version of controller-runtime +for each minor version of client-go and other k8s.io/* dependencies. + +The minimum Go version of controller-runtime is the highest minimum Go version of our Go dependencies. Usually, this will +be identical to the minimum Go version of the corresponding k8s.io/* dependencies. + +Compatible k8s.io/*, client-go and minimum Go versions can be looked up in our [go.mod](go.mod) file. + +| | k8s.io/*, client-go | minimum Go version | +|----------|:-------------------:|:------------------:| +| CR v0.19 | v0.31 | 1.22 | +| CR v0.18 | v0.30 | 1.22 | +| CR v0.17 | v0.29 | 1.21 | +| CR v0.16 | v0.28 | 1.20 | +| CR v0.15 | v0.27 | 1.20 | + ## FAQ See [FAQ.md](FAQ.md) @@ -57,6 +76,7 @@ You can reach the maintainers of this project at: - Google Group: [kubebuilder@googlegroups.com](https://groups.google.com/forum/#!forum/kubebuilder) ## Contributing + Contributions are greatly appreciated. The maintainers actively manage the issues list, and try to highlight issues suitable for newcomers. The project follows the typical GitHub pull request model. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. Before starting any work, please either comment on an existing issue, or file a new one. diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go index e4f61b1538..3e1ccdcf08 100644 --- a/vendor/sigs.k8s.io/controller-runtime/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -122,8 +122,8 @@ var ( // there is another OwnerReference with Controller flag set. SetControllerReference = controllerutil.SetControllerReference - // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned - // which is closed on one of these signals. If a second signal is caught, the program + // SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned + // which is canceled on one of these signals. If a second signal is caught, the program // is terminated with exit code 1. SetupSignalHandler = signals.SetupSignalHandler diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go index 2c0063a837..6d906f6e52 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -19,6 +19,7 @@ package builder import ( "errors" "fmt" + "reflect" "strings" "github.com/go-logr/logr" @@ -36,10 +37,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -// Supporting mocking out functions for testing. -var newController = controller.New -var getGvk = apiutil.GVKForObject - // project represents other forms that we can use to // send/receive a given resource (metadata-only, unstructured, etc). type objectProjection int @@ -52,21 +49,32 @@ const ( ) // Builder builds a Controller. -type Builder struct { +type Builder = TypedBuilder[reconcile.Request] + +// TypedBuilder builds a Controller. The request is the request type +// that is passed to the workqueue and then to the Reconciler. +// The workqueue de-duplicates identical requests. +type TypedBuilder[request comparable] struct { forInput ForInput ownsInput []OwnsInput - rawSources []source.Source - watchesInput []WatchesInput + rawSources []source.TypedSource[request] + watchesInput []WatchesInput[request] mgr manager.Manager globalPredicates []predicate.Predicate - ctrl controller.Controller - ctrlOptions controller.Options + ctrl controller.TypedController[request] + ctrlOptions controller.TypedOptions[request] name string + newController func(name string, mgr manager.Manager, options controller.TypedOptions[request]) (controller.TypedController[request], error) } // ControllerManagedBy returns a new controller builder that will be started by the provided Manager. func ControllerManagedBy(m manager.Manager) *Builder { - return &Builder{mgr: m} + return TypedControllerManagedBy[reconcile.Request](m) +} + +// TypedControllerManagedBy returns a new typed controller builder that will be started by the provided Manager. +func TypedControllerManagedBy[request comparable](m manager.Manager) *TypedBuilder[request] { + return &TypedBuilder[request]{mgr: m} } // ForInput represents the information set by the For method. @@ -79,9 +87,10 @@ type ForInput struct { // For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / // update events by *reconciling the object*. +// // This is the equivalent of calling -// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}). -func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { +// Watches(source.Kind(cache, &Type{}, &handler.EnqueueRequestForObject{})). +func (blder *TypedBuilder[request]) For(object client.Object, opts ...ForOption) *TypedBuilder[request] { if blder.forInput.object != nil { blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation") return blder @@ -110,8 +119,8 @@ type OwnsInput struct { // Use Owns(object, builder.MatchEveryOwner) to reconcile all owners. // // By default, this is the equivalent of calling -// Watches(object, handler.EnqueueRequestForOwner([...], ownerType, OnlyControllerOwner())). -func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { +// Watches(source.Kind(cache, &Type{}, handler.EnqueueRequestForOwner([...], &OwnerType{}, OnlyControllerOwner()))). +func (blder *TypedBuilder[request]) Owns(object client.Object, opts ...OwnsOption) *TypedBuilder[request] { input := OwnsInput{object: object} for _, opt := range opts { opt.ApplyToOwns(&input) @@ -121,21 +130,38 @@ func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { return blder } +type untypedWatchesInput interface { + setPredicates([]predicate.Predicate) + setObjectProjection(objectProjection) +} + // WatchesInput represents the information set by Watches method. -type WatchesInput struct { +type WatchesInput[request comparable] struct { obj client.Object - handler handler.EventHandler + handler handler.TypedEventHandler[client.Object, request] predicates []predicate.Predicate objectProjection objectProjection } +func (w *WatchesInput[request]) setPredicates(predicates []predicate.Predicate) { + w.predicates = predicates +} + +func (w *WatchesInput[request]) setObjectProjection(objectProjection objectProjection) { + w.objectProjection = objectProjection +} + // Watches defines the type of Object to watch, and configures the ControllerManagedBy to respond to create / delete / // update events by *reconciling the object* with the given EventHandler. // // This is the equivalent of calling // WatchesRawSource(source.Kind(cache, object, eventHandler, predicates...)). -func (blder *Builder) Watches(object client.Object, eventHandler handler.EventHandler, opts ...WatchesOption) *Builder { - input := WatchesInput{ +func (blder *TypedBuilder[request]) Watches( + object client.Object, + eventHandler handler.TypedEventHandler[client.Object, request], + opts ...WatchesOption, +) *TypedBuilder[request] { + input := WatchesInput[request]{ obj: object, handler: eventHandler, } @@ -175,19 +201,21 @@ func (blder *Builder) Watches(object client.Object, eventHandler handler.EventHa // In the first case, controller-runtime will create another cache for the // concrete type on top of the metadata cache; this increases memory // consumption and leads to race conditions as caches are not in sync. -func (blder *Builder) WatchesMetadata(object client.Object, eventHandler handler.EventHandler, opts ...WatchesOption) *Builder { +func (blder *TypedBuilder[request]) WatchesMetadata( + object client.Object, + eventHandler handler.TypedEventHandler[client.Object, request], + opts ...WatchesOption, +) *TypedBuilder[request] { opts = append(opts, OnlyMetadata) return blder.Watches(object, eventHandler, opts...) } // WatchesRawSource exposes the lower-level ControllerManagedBy Watches functions through the builder. -// Specified predicates are registered only for given source. -// -// STOP! Consider using For(...), Owns(...), Watches(...), WatchesMetadata(...) instead. -// This method is only exposed for more advanced use cases, most users should use one of the higher level functions. // // WatchesRawSource does not respect predicates configured through WithEventFilter. -func (blder *Builder) WatchesRawSource(src source.Source) *Builder { +// +// WatchesRawSource makes it possible to use typed handlers and predicates with `source.Kind` as well as custom source implementations. +func (blder *TypedBuilder[request]) WatchesRawSource(src source.TypedSource[request]) *TypedBuilder[request] { blder.rawSources = append(blder.rawSources, src) return blder @@ -195,21 +223,23 @@ func (blder *Builder) WatchesRawSource(src source.Source) *Builder { // WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually // trigger reconciliations. For example, filtering on whether the resource version has changed. -// Given predicate is added for all watched objects. +// Given predicate is added for all watched objects and thus must be able to deal with the type +// of all watched objects. +// // Defaults to the empty list. -func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { +func (blder *TypedBuilder[request]) WithEventFilter(p predicate.Predicate) *TypedBuilder[request] { blder.globalPredicates = append(blder.globalPredicates, p) return blder } // WithOptions overrides the controller options used in doController. Defaults to empty. -func (blder *Builder) WithOptions(options controller.Options) *Builder { +func (blder *TypedBuilder[request]) WithOptions(options controller.TypedOptions[request]) *TypedBuilder[request] { blder.ctrlOptions = options return blder } // WithLogConstructor overrides the controller options's LogConstructor. -func (blder *Builder) WithLogConstructor(logConstructor func(*reconcile.Request) logr.Logger) *Builder { +func (blder *TypedBuilder[request]) WithLogConstructor(logConstructor func(*request) logr.Logger) *TypedBuilder[request] { blder.ctrlOptions.LogConstructor = logConstructor return blder } @@ -219,19 +249,21 @@ func (blder *Builder) WithLogConstructor(logConstructor func(*reconcile.Request) // (underscores and alphanumeric characters only). // // By default, controllers are named using the lowercase version of their kind. -func (blder *Builder) Named(name string) *Builder { +// +// The name must be unique as it is used to identify the controller in metrics and logs. +func (blder *TypedBuilder[request]) Named(name string) *TypedBuilder[request] { blder.name = name return blder } // Complete builds the Application Controller. -func (blder *Builder) Complete(r reconcile.Reconciler) error { +func (blder *TypedBuilder[request]) Complete(r reconcile.TypedReconciler[request]) error { _, err := blder.Build(r) return err } // Build builds the Application Controller and returns the Controller it created. -func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, error) { +func (blder *TypedBuilder[request]) Build(r reconcile.TypedReconciler[request]) (controller.TypedController[request], error) { if r == nil { return nil, fmt.Errorf("must provide a non-nil Reconciler") } @@ -255,13 +287,13 @@ func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, erro return blder.ctrl, nil } -func (blder *Builder) project(obj client.Object, proj objectProjection) (client.Object, error) { +func (blder *TypedBuilder[request]) project(obj client.Object, proj objectProjection) (client.Object, error) { switch proj { case projectAsNormal: return obj, nil case projectAsMetadata: metaObj := &metav1.PartialObjectMetadata{} - gvk, err := getGvk(obj, blder.mgr.GetScheme()) + gvk, err := apiutil.GVKForObject(obj, blder.mgr.GetScheme()) if err != nil { return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) } @@ -272,17 +304,23 @@ func (blder *Builder) project(obj client.Object, proj objectProjection) (client. } } -func (blder *Builder) doWatch() error { +func (blder *TypedBuilder[request]) doWatch() error { // Reconcile type if blder.forInput.object != nil { obj, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) if err != nil { return err } - hdler := &handler.EnqueueRequestForObject{} + + if reflect.TypeFor[request]() != reflect.TypeOf(reconcile.Request{}) { + return fmt.Errorf("For() can only be used with reconcile.Request, got %T", *new(request)) + } + + var hdler handler.TypedEventHandler[client.Object, request] + reflect.ValueOf(&hdler).Elem().Set(reflect.ValueOf(&handler.EnqueueRequestForObject{})) allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, blder.forInput.predicates...) - src := source.Kind(blder.mgr.GetCache(), obj, hdler, allPredicates...) + src := source.TypedKind(blder.mgr.GetCache(), obj, hdler, allPredicates...) if err := blder.ctrl.Watch(src); err != nil { return err } @@ -301,14 +339,16 @@ func (blder *Builder) doWatch() error { if !own.matchEveryOwner { opts = append(opts, handler.OnlyControllerOwner()) } - hdler := handler.EnqueueRequestForOwner( + + var hdler handler.TypedEventHandler[client.Object, request] + reflect.ValueOf(&hdler).Elem().Set(reflect.ValueOf(handler.EnqueueRequestForOwner( blder.mgr.GetScheme(), blder.mgr.GetRESTMapper(), blder.forInput.object, opts..., - ) + ))) allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, own.predicates...) - src := source.Kind(blder.mgr.GetCache(), obj, hdler, allPredicates...) + src := source.TypedKind(blder.mgr.GetCache(), obj, hdler, allPredicates...) if err := blder.ctrl.Watch(src); err != nil { return err } @@ -325,7 +365,7 @@ func (blder *Builder) doWatch() error { } allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, w.predicates...) - if err := blder.ctrl.Watch(source.Kind(blder.mgr.GetCache(), projected, w.handler, allPredicates...)); err != nil { + if err := blder.ctrl.Watch(source.TypedKind(blder.mgr.GetCache(), projected, w.handler, allPredicates...)); err != nil { return err } } @@ -337,7 +377,7 @@ func (blder *Builder) doWatch() error { return nil } -func (blder *Builder) getControllerName(gvk schema.GroupVersionKind, hasGVK bool) (string, error) { +func (blder *TypedBuilder[request]) getControllerName(gvk schema.GroupVersionKind, hasGVK bool) (string, error) { if blder.name != "" { return blder.name, nil } @@ -347,7 +387,7 @@ func (blder *Builder) getControllerName(gvk schema.GroupVersionKind, hasGVK bool return strings.ToLower(gvk.Kind), nil } -func (blder *Builder) doController(r reconcile.Reconciler) error { +func (blder *TypedBuilder[request]) doController(r reconcile.TypedReconciler[request]) error { globalOpts := blder.mgr.GetControllerOptions() ctrlOptions := blder.ctrlOptions @@ -364,7 +404,7 @@ func (blder *Builder) doController(r reconcile.Reconciler) error { hasGVK := blder.forInput.object != nil if hasGVK { var err error - gvk, err = getGvk(blder.forInput.object, blder.mgr.GetScheme()) + gvk, err = apiutil.GVKForObject(blder.forInput.object, blder.mgr.GetScheme()) if err != nil { return err } @@ -401,9 +441,10 @@ func (blder *Builder) doController(r reconcile.Reconciler) error { ) } - ctrlOptions.LogConstructor = func(req *reconcile.Request) logr.Logger { + ctrlOptions.LogConstructor = func(in *request) logr.Logger { log := log - if req != nil { + + if req, ok := any(in).(*reconcile.Request); ok && req != nil { if hasGVK { log = log.WithValues(gvk.Kind, klog.KRef(req.Namespace, req.Name)) } @@ -415,7 +456,11 @@ func (blder *Builder) doController(r reconcile.Reconciler) error { } } + if blder.newController == nil { + blder.newController = controller.NewTyped[request] + } + // Build the controller and return. - blder.ctrl, err = newController(controllerName, blder.mgr, ctrlOptions) + blder.ctrl, err = blder.newController(controllerName, blder.mgr, ctrlOptions) return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go index 15f66b2a82..b907b5d020 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go @@ -37,7 +37,7 @@ type OwnsOption interface { // WatchesOption is some configuration that modifies options for a watches request. type WatchesOption interface { // ApplyToWatches applies this configuration to the given watches options. - ApplyToWatches(*WatchesInput) + ApplyToWatches(untypedWatchesInput) } // }}} @@ -67,8 +67,8 @@ func (w Predicates) ApplyToOwns(opts *OwnsInput) { } // ApplyToWatches applies this configuration to the given WatchesInput options. -func (w Predicates) ApplyToWatches(opts *WatchesInput) { - opts.predicates = w.predicates +func (w Predicates) ApplyToWatches(opts untypedWatchesInput) { + opts.setPredicates(w.predicates) } var _ ForOption = &Predicates{} @@ -95,8 +95,8 @@ func (p projectAs) ApplyToOwns(opts *OwnsInput) { } // ApplyToWatches applies this configuration to the given WatchesInput options. -func (p projectAs) ApplyToWatches(opts *WatchesInput) { - opts.objectProjection = objectProjection(p) +func (p projectAs) ApplyToWatches(opts untypedWatchesInput) { + opts.setObjectProjection(objectProjection(p)) } var ( diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go index 6170180c74..81d8f74056 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go @@ -42,7 +42,7 @@ type WebhookBuilder struct { gvk schema.GroupVersionKind mgr manager.Manager config *rest.Config - recoverPanic bool + recoverPanic *bool logConstructor func(base logr.Logger, req *admission.Request) logr.Logger err error } @@ -84,8 +84,9 @@ func (blder *WebhookBuilder) WithLogConstructor(logConstructor func(base logr.Lo } // RecoverPanic indicates whether panics caused by the webhook should be recovered. -func (blder *WebhookBuilder) RecoverPanic() *WebhookBuilder { - blder.recoverPanic = true +// Defaults to true. +func (blder *WebhookBuilder) RecoverPanic(recoverPanic bool) *WebhookBuilder { + blder.recoverPanic = &recoverPanic return blder } @@ -169,10 +170,18 @@ func (blder *WebhookBuilder) registerDefaultingWebhook() { func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { if defaulter := blder.customDefaulter; defaulter != nil { - return admission.WithCustomDefaulter(blder.mgr.GetScheme(), blder.apiType, defaulter).WithRecoverPanic(blder.recoverPanic) + w := admission.WithCustomDefaulter(blder.mgr.GetScheme(), blder.apiType, defaulter) + if blder.recoverPanic != nil { + w = w.WithRecoverPanic(*blder.recoverPanic) + } + return w } if defaulter, ok := blder.apiType.(admission.Defaulter); ok { - return admission.DefaultingWebhookFor(blder.mgr.GetScheme(), defaulter).WithRecoverPanic(blder.recoverPanic) + w := admission.DefaultingWebhookFor(blder.mgr.GetScheme(), defaulter) + if blder.recoverPanic != nil { + w = w.WithRecoverPanic(*blder.recoverPanic) + } + return w } log.Info( "skip registering a mutating webhook, object does not implement admission.Defaulter or WithDefaulter wasn't called", @@ -200,10 +209,18 @@ func (blder *WebhookBuilder) registerValidatingWebhook() { func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook { if validator := blder.customValidator; validator != nil { - return admission.WithCustomValidator(blder.mgr.GetScheme(), blder.apiType, validator).WithRecoverPanic(blder.recoverPanic) + w := admission.WithCustomValidator(blder.mgr.GetScheme(), blder.apiType, validator) + if blder.recoverPanic != nil { + w = w.WithRecoverPanic(*blder.recoverPanic) + } + return w } if validator, ok := blder.apiType.(admission.Validator); ok { - return admission.ValidatingWebhookFor(blder.mgr.GetScheme(), validator).WithRecoverPanic(blder.recoverPanic) + w := admission.ValidatingWebhookFor(blder.mgr.GetScheme(), validator) + if blder.recoverPanic != nil { + w = w.WithRecoverPanic(*blder.recoverPanic) + } + return w } log.Info( "skip registering a validating webhook, object does not implement admission.Validator or WithValidator wasn't called", diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 612dcca8b3..706f9c6cdd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -117,8 +117,8 @@ type Informer interface { // This function is guaranteed to be idempotent and thread-safe. RemoveEventHandler(handle toolscache.ResourceEventHandlerRegistration) error - // AddIndexers adds indexers to this store. If this is called after there is already data - // in the store, the results are undefined. + // AddIndexers adds indexers to this store. It is valid to add indexers + // after an informer was started. AddIndexers(indexers toolscache.Indexers) error // HasSynced return true if the informers underlying store has synced. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go index 2b9b60d8d7..fe15fc0dd7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -173,14 +173,14 @@ func (cw *CertWatcher) ReadCertificate() error { func (cw *CertWatcher) handleEvent(event fsnotify.Event) { // Only care about events which may modify the contents of the file. - if !(isWrite(event) || isRemove(event) || isCreate(event)) { + if !(isWrite(event) || isRemove(event) || isCreate(event) || isChmod(event)) { return } log.V(1).Info("certificate event", "event", event) - // If the file was removed, re-add the watch. - if isRemove(event) { + // If the file was removed or renamed, re-add the watch to the previous name + if isRemove(event) || isChmod(event) { if err := cw.watcher.Add(event.Name); err != nil { log.Error(err, "error re-watching file") } @@ -202,3 +202,7 @@ func isCreate(event fsnotify.Event) bool { func isRemove(event fsnotify.Event) bool { return event.Op.Has(fsnotify.Remove) } + +func isChmod(event fsnotify.Event) bool { + return event.Op.Has(fsnotify.Chmod) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 3c0206bea5..1d4ce264c9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -72,7 +72,10 @@ func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper m // IsGVKNamespaced returns true if the object having the provided // GVK is namespace scoped. func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { - restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + // Fetch the RESTMapping using the complete GVK. If we exclude the Version, the Version set + // will be populated using the cached Group if available. This can lead to failures updating + // the cache with new Versions of CRDs registered at runtime. + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) if err != nil { return false, fmt.Errorf("failed to get restmapping: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index e6c075eb00..fe9862b814 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -50,28 +50,10 @@ type Options struct { // Cache, if provided, is used to read objects from the cache. Cache *CacheOptions - // WarningHandler is used to configure the warning handler responsible for - // surfacing and handling warnings messages sent by the API server. - WarningHandler WarningHandlerOptions - // DryRun instructs the client to only perform dry run requests. DryRun *bool } -// WarningHandlerOptions are options for configuring a -// warning handler for the client which is responsible -// for surfacing API Server warnings. -type WarningHandlerOptions struct { - // SuppressWarnings decides if the warnings from the - // API server are suppressed or surfaced in the client. - SuppressWarnings bool - // AllowDuplicateLogs does not deduplicate the to-be - // logged surfaced warnings messages. See - // log.WarningHandlerOptions for considerations - // regarding deduplication - AllowDuplicateLogs bool -} - // CacheOptions are options for creating a cache-backed client. type CacheOptions struct { // Reader is a cache-backed reader that will be used to read objects from the cache. @@ -91,6 +73,12 @@ type NewClientFunc func(config *rest.Config, options Options) (Client, error) // New returns a new Client using the provided config and Options. // +// By default, the client surfaces warnings returned by the server. To +// suppress warnings, set config.WarningHandler = rest.NoWarnings{}. To +// define custom behavior, implement the rest.WarningHandler interface. +// See [sigs.k8s.io/controller-runtime/pkg/log.KubeAPIWarningLogger] for +// an example. +// // The client's read behavior is determined by Options.Cache. // If either Options.Cache or Options.Cache.Reader is nil, // the client reads directly from the API server. @@ -124,17 +112,12 @@ func newClient(config *rest.Config, options Options) (*client, error) { config.UserAgent = rest.DefaultKubernetesUserAgent() } - if !options.WarningHandler.SuppressWarnings { - // surface warnings - logger := log.Log.WithName("KubeAPIWarningLogger") - // Set a WarningHandler, the default WarningHandler - // is log.KubeAPIWarningLogger with deduplication enabled. - // See log.KubeAPIWarningLoggerOptions for considerations - // regarding deduplication. + if config.WarningHandler == nil { + // By default, we de-duplicate and surface warnings. config.WarningHandler = log.NewKubeAPIWarningLogger( - logger, + log.Log.WithName("KubeAPIWarningLogger"), log.KubeAPIWarningLoggerOptions{ - Deduplicate: !options.WarningHandler.AllowDuplicateLogs, + Deduplicate: true, }, ) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go index 2e982e3a55..7366a18528 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -30,7 +30,9 @@ import ( "time" // Using v4 to match upstream - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -50,6 +52,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/testing" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -83,6 +86,8 @@ const ( maxNameLength = 63 randomLength = 5 maxGeneratedNameLength = maxNameLength - randomLength + + subResourceScale = "scale" ) // NewFakeClient creates a new fake client for testing. @@ -301,7 +306,7 @@ func (t versionedTracker) Add(obj runtime.Object) error { return nil } -func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error { accessor, err := meta.Accessor(obj) if err != nil { return fmt.Errorf("failed to get accessor for object: %w", err) @@ -320,7 +325,7 @@ func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Ob if err != nil { return err } - if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil { + if err := t.ObjectTracker.Create(gvr, obj, ns, opts...); err != nil { accessor.SetResourceVersion("") return err } @@ -359,24 +364,59 @@ func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (ru return typed, nil } -func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error { + updateOpts, err := getSingleOrZeroOptions(opts) + if err != nil { + return err + } + + return t.update(gvr, obj, ns, false, false, updateOpts) +} + +func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus, deleting bool, opts metav1.UpdateOptions) error { + obj, err := t.updateObject(gvr, obj, ns, isStatus, deleting, opts.DryRun) + if err != nil { + return err + } + if obj == nil { + return nil + } + + return t.ObjectTracker.Update(gvr, obj, ns, opts) +} + +func (t versionedTracker) Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error { + patchOptions, err := getSingleOrZeroOptions(opts) + if err != nil { + return err + } + isStatus := false - // We apply patches using a client-go reaction that ends up calling the trackers Update. As we can't change + // We apply patches using a client-go reaction that ends up calling the trackers Patch. As we can't change // that reaction, we use the callstack to figure out if this originated from the status client. if bytes.Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeSubResourceClient).statusPatch")) { isStatus = true } - return t.update(gvr, obj, ns, isStatus, false) + + obj, err = t.updateObject(gvr, obj, ns, isStatus, false, patchOptions.DryRun) + if err != nil { + return err + } + if obj == nil { + return nil + } + + return t.ObjectTracker.Patch(gvr, obj, ns, patchOptions) } -func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus bool, deleting bool) error { +func (t versionedTracker) updateObject(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus, deleting bool, dryRun []string) (runtime.Object, error) { accessor, err := meta.Accessor(obj) if err != nil { - return fmt.Errorf("failed to get accessor for object: %w", err) + return nil, fmt.Errorf("failed to get accessor for object: %w", err) } if accessor.GetName() == "" { - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( obj.GetObjectKind().GroupVersionKind().GroupKind(), accessor.GetName(), field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) @@ -384,7 +424,7 @@ func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Ob gvk, err := apiutil.GVKForObject(obj, t.scheme) if err != nil { - return err + return nil, err } oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) @@ -392,33 +432,33 @@ func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Ob // If the resource is not found and the resource allows create on update, issue a // create instead. if apierrors.IsNotFound(err) && allowsCreateOnUpdate(gvk) { - return t.Create(gvr, obj, ns) + return nil, t.Create(gvr, obj, ns) } - return err + return nil, err } if t.withStatusSubresource.Has(gvk) { if isStatus { // copy everything but status and metadata.ResourceVersion from original object if err := copyStatusFrom(obj, oldObject); err != nil { - return fmt.Errorf("failed to copy non-status field for object with status subresouce: %w", err) + return nil, fmt.Errorf("failed to copy non-status field for object with status subresouce: %w", err) } passedRV := accessor.GetResourceVersion() if err := copyFrom(oldObject, obj); err != nil { - return fmt.Errorf("failed to restore non-status fields: %w", err) + return nil, fmt.Errorf("failed to restore non-status fields: %w", err) } accessor.SetResourceVersion(passedRV) } else { // copy status from original object if err := copyStatusFrom(oldObject, obj); err != nil { - return fmt.Errorf("failed to copy the status for object with status subresource: %w", err) + return nil, fmt.Errorf("failed to copy the status for object with status subresource: %w", err) } } } else if isStatus { - return apierrors.NewNotFound(gvr.GroupResource(), accessor.GetName()) + return nil, apierrors.NewNotFound(gvr.GroupResource(), accessor.GetName()) } oldAccessor, err := meta.Accessor(oldObject) if err != nil { - return err + return nil, err } // If the new object does not have the resource version set and it allows unconditional update, @@ -436,30 +476,26 @@ func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Ob } if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { - return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) + return nil, apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) } if oldAccessor.GetResourceVersion() == "" { oldAccessor.SetResourceVersion("0") } intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) if err != nil { - return fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err) + return nil, fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err) } intResourceVersion++ accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) if !deleting && !deletionTimestampEqual(accessor, oldAccessor) { - return fmt.Errorf("error: Unable to edit %s: metadata.deletionTimestamp field is immutable", accessor.GetName()) + return nil, fmt.Errorf("error: Unable to edit %s: metadata.deletionTimestamp field is immutable", accessor.GetName()) } if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 { - return t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) - } - obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) - if err != nil { - return err + return nil, t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName(), metav1.DeleteOptions{DryRun: dryRun}) } - return t.ObjectTracker.Update(gvr, obj, ns) + return convertFromUnstructuredIfNecessary(t.scheme, obj) } func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { @@ -803,7 +839,7 @@ func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.Upd if err != nil { return err } - return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false) + return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false, *updateOptions.AsUpdateOptions()) } func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { @@ -1056,7 +1092,7 @@ func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor meta oldAccessor.SetDeletionTimestamp(&now) // Call update directly with mutability parameter set to true to allow // changes to deletionTimestamp - return c.tracker.update(gvr, old, accessor.GetNamespace(), false, true) + return c.tracker.update(gvr, old, accessor.GetNamespace(), false, true, metav1.UpdateOptions{}) } } } @@ -1080,7 +1116,26 @@ type fakeSubResourceClient struct { } func (sw *fakeSubResourceClient) Get(ctx context.Context, obj, subResource client.Object, opts ...client.SubResourceGetOption) error { - panic("fakeSubResourceClient does not support get") + switch sw.subResource { + case subResourceScale: + // Actual client looks up resource, then extracts the scale sub-resource: + // https://github.com/kubernetes/kubernetes/blob/fb6bbc9781d11a87688c398778525c4e1dcb0f08/pkg/registry/apps/deployment/storage/storage.go#L307 + if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + return err + } + scale, isScale := subResource.(*autoscalingv1.Scale) + if !isScale { + return apierrors.NewBadRequest(fmt.Sprintf("expected Scale, got %t", subResource)) + } + scaleOut, err := extractScale(obj) + if err != nil { + return err + } + *scale = *scaleOut + return nil + default: + return fmt.Errorf("fakeSubResourceClient does not support get for %s", sw.subResource) + } } func (sw *fakeSubResourceClient) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { @@ -1107,11 +1162,30 @@ func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, updateOptions := client.SubResourceUpdateOptions{} updateOptions.ApplyOptions(opts) - body := obj - if updateOptions.SubResourceBody != nil { - body = updateOptions.SubResourceBody + switch sw.subResource { + case subResourceScale: + if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + return err + } + if updateOptions.SubResourceBody == nil { + return apierrors.NewBadRequest("missing SubResourceBody") + } + + scale, isScale := updateOptions.SubResourceBody.(*autoscalingv1.Scale) + if !isScale { + return apierrors.NewBadRequest(fmt.Sprintf("expected Scale, got %t", updateOptions.SubResourceBody)) + } + if err := applyScale(obj, scale); err != nil { + return err + } + return sw.client.update(obj, false, &updateOptions.UpdateOptions) + default: + body := obj + if updateOptions.SubResourceBody != nil { + body = updateOptions.SubResourceBody + } + return sw.client.update(body, true, &updateOptions.UpdateOptions) } - return sw.client.update(body, true, &updateOptions.UpdateOptions) } func (sw *fakeSubResourceClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { @@ -1278,3 +1352,138 @@ func zero(x interface{}) { res := reflect.ValueOf(x).Elem() res.Set(reflect.Zero(res.Type())) } + +// getSingleOrZeroOptions returns the single options value in the slice, its +// zero value if the slice is empty, or an error if the slice contains more than +// one option value. +func getSingleOrZeroOptions[T any](opts []T) (opt T, err error) { + switch len(opts) { + case 0: + case 1: + opt = opts[0] + default: + err = fmt.Errorf("expected single or no options value, got %d values", len(opts)) + } + return +} + +func extractScale(obj client.Object) (*autoscalingv1.Scale, error) { + switch obj := obj.(type) { + case *appsv1.Deployment: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + var selector string + if obj.Spec.Selector != nil { + selector = obj.Spec.Selector.String() + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: selector, + }, + }, nil + case *appsv1.ReplicaSet: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + var selector string + if obj.Spec.Selector != nil { + selector = obj.Spec.Selector.String() + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: selector, + }, + }, nil + case *corev1.ReplicationController: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: labels.Set(obj.Spec.Selector).String(), + }, + }, nil + case *appsv1.StatefulSet: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + var selector string + if obj.Spec.Selector != nil { + selector = obj.Spec.Selector.String() + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: selector, + }, + }, nil + default: + // TODO: CRDs https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource + return nil, fmt.Errorf("unimplemented scale subresource for resource %T", obj) + } +} + +func applyScale(obj client.Object, scale *autoscalingv1.Scale) error { + switch obj := obj.(type) { + case *appsv1.Deployment: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + case *appsv1.ReplicaSet: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + case *corev1.ReplicationController: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + case *appsv1.StatefulSet: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + default: + // TODO: CRDs https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource + return fmt.Errorf("unimplemented scale subresource for resource %T", obj) + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go new file mode 100644 index 0000000000..659b3d44c9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// WithFieldValidation wraps a Client and configures field validation, by +// default, for all write requests from this client. Users can override field +// validation for individual write requests. +func WithFieldValidation(c Client, validation FieldValidation) Client { + return &clientWithFieldValidation{ + validation: validation, + client: c, + Reader: c, + } +} + +type clientWithFieldValidation struct { + validation FieldValidation + client Client + Reader +} + +func (c *clientWithFieldValidation) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append([]CreateOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append([]UpdateOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append([]PatchOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, opts...) +} + +func (c *clientWithFieldValidation) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c *clientWithFieldValidation) Scheme() *runtime.Scheme { return c.client.Scheme() } +func (c *clientWithFieldValidation) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +func (c *clientWithFieldValidation) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +func (c *clientWithFieldValidation) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +func (c *clientWithFieldValidation) Status() StatusWriter { + return &subresourceClientWithFieldValidation{ + validation: c.validation, + subresourceWriter: c.client.Status(), + } +} + +func (c *clientWithFieldValidation) SubResource(subresource string) SubResourceClient { + srClient := c.client.SubResource(subresource) + return &subresourceClientWithFieldValidation{ + validation: c.validation, + subresourceWriter: srClient, + SubResourceReader: srClient, + } +} + +type subresourceClientWithFieldValidation struct { + validation FieldValidation + subresourceWriter SubResourceWriter + SubResourceReader +} + +func (c *subresourceClientWithFieldValidation) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error { + return c.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{c.validation}, opts...)...) +} + +func (c *subresourceClientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return c.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{c.validation}, opts...)...) +} + +func (c *subresourceClientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return c.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{c.validation}, opts...)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 798506f486..db50ed8feb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -169,6 +169,39 @@ func (f FieldOwner) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { opts.FieldManager = string(f) } +// FieldValidation configures field validation for the given requests. +type FieldValidation string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldValidation) ApplyToPatch(opts *PatchOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldValidation) ApplyToCreate(opts *CreateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldValidation) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourcePatch applies this configuration to the given patch options. +func (f FieldValidation) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourceCreate applies this configuration to the given create options. +func (f FieldValidation) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourceUpdate applies this configuration to the given update options. +func (f FieldValidation) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.FieldValidation = string(f) +} + // }}} // {{{ Create Options @@ -187,6 +220,24 @@ type CreateOptions struct { // this request. It must be set with server-side apply. FieldManager string + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw CreateOptions, as passed to the API server. Raw *metav1.CreateOptions } @@ -203,6 +254,7 @@ func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { o.Raw.DryRun = o.DryRun o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } @@ -223,6 +275,9 @@ func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { if o.FieldManager != "" { co.FieldManager = o.FieldManager } + if o.FieldValidation != "" { + co.FieldValidation = o.FieldValidation + } if o.Raw != nil { co.Raw = o.Raw } @@ -679,6 +734,24 @@ type UpdateOptions struct { // this request. It must be set with server-side apply. FieldManager string + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw UpdateOptions, as passed to the API server. Raw *metav1.UpdateOptions } @@ -695,6 +768,7 @@ func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { o.Raw.DryRun = o.DryRun o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } @@ -717,6 +791,9 @@ func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { if o.FieldManager != "" { uo.FieldManager = o.FieldManager } + if o.FieldValidation != "" { + uo.FieldValidation = o.FieldValidation + } if o.Raw != nil { uo.Raw = o.Raw } @@ -745,6 +822,24 @@ type PatchOptions struct { // this request. It must be set with server-side apply. FieldManager string + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw PatchOptions, as passed to the API server. Raw *metav1.PatchOptions } @@ -771,6 +866,7 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { o.Raw.DryRun = o.DryRun o.Raw.Force = o.Force o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } @@ -787,6 +883,9 @@ func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { if o.FieldManager != "" { po.FieldManager = o.FieldManager } + if o.FieldValidation != "" { + po.FieldValidation = o.FieldValidation + } if o.Raw != nil { po.Raw = o.Raw } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go index b37dffaeea..999ef07e21 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go @@ -20,6 +20,12 @@ import "time" // Controller contains configuration options for a controller. type Controller struct { + // SkipNameValidation allows skipping the name validation that ensures that every controller name is unique. + // Unique controller names are important to get unique metrics and logs for a controller. + // Can be overwritten for a controller via the SkipNameValidation setting on the controller. + // Defaults to false if SkipNameValidation setting on controller and Manager are unset. + SkipNameValidation *bool + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation // allowed for that controller. // @@ -40,7 +46,8 @@ type Controller struct { CacheSyncTimeout time.Duration // RecoverPanic indicates whether the panic caused by reconcile should be recovered. - // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + // Can be overwritten for a controller via the RecoverPanic setting on the controller. + // Defaults to true if RecoverPanic setting on controller and Manager are unset. RecoverPanic *bool // NeedLeaderElection indicates whether the controller needs to use leader election. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 5c9e48beae..f2496236db 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -27,13 +27,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/internal/controller" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) // Options are the arguments for creating a new Controller. -type Options struct { +type Options = TypedOptions[reconcile.Request] + +// TypedOptions are the arguments for creating a new Controller. +type TypedOptions[request comparable] struct { + // SkipNameValidation allows skipping the name validation that ensures that every controller name is unique. + // Unique controller names are important to get unique metrics and logs for a controller. + // Defaults to the Controller.SkipNameValidation setting from the Manager if unset. + // Defaults to false if Controller.SkipNameValidation setting from the Manager is also unset. + SkipNameValidation *bool + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int @@ -43,6 +51,7 @@ type Options struct { // RecoverPanic indicates whether the panic caused by reconcile should be recovered. // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + // Defaults to true if Controller.RecoverPanic setting from the Manager is also unset. RecoverPanic *bool // NeedLeaderElection indicates whether the controller needs to use leader election. @@ -50,12 +59,12 @@ type Options struct { NeedLeaderElection *bool // Reconciler reconciles an object - Reconciler reconcile.Reconciler + Reconciler reconcile.TypedReconciler[request] // RateLimiter is used to limit how frequently requests may be queued. // Defaults to MaxOfRateLimiter which has both overall and per-item rate limiting. // The overall is a token bucket and the per-item is exponential. - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[request] // NewQueue constructs the queue for this controller once the controller is ready to start. // With NewQueue a custom queue implementation can be used, e.g. a priority queue to prioritize with which @@ -67,23 +76,26 @@ type Options struct { // // NOTE: LOW LEVEL PRIMITIVE! // Only use a custom NewQueue if you know what you are doing. - NewQueue func(controllerName string, rateLimiter ratelimiter.RateLimiter) workqueue.RateLimitingInterface + NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] // LogConstructor is used to construct a logger used for this controller and passed // to each reconciliation via the context field. - LogConstructor func(request *reconcile.Request) logr.Logger + LogConstructor func(request *request) logr.Logger } // Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests // from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item. // Work typically is reads and writes Kubernetes objects to make the system state match the state specified // in the object Spec. -type Controller interface { +type Controller = TypedController[reconcile.Request] + +// TypedController implements an API. +type TypedController[request comparable] interface { // Reconciler is called to reconcile an object by Namespace/Name - reconcile.Reconciler + reconcile.TypedReconciler[request] // Watch watches the provided Source. - Watch(src source.Source) error + Watch(src source.TypedSource[request]) error // Start starts the controller. Start blocks until the context is closed or a // controller has an error starting. @@ -95,8 +107,17 @@ type Controller interface { // New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have // been synced before the Controller is Started. +// +// The name must be unique as it is used to identify the controller in metrics and logs. func New(name string, mgr manager.Manager, options Options) (Controller, error) { - c, err := NewUnmanaged(name, mgr, options) + return NewTyped(name, mgr, options) +} + +// NewTyped returns a new typed controller registered with the Manager, +// +// The name must be unique as it is used to identify the controller in metrics and logs. +func NewTyped[request comparable](name string, mgr manager.Manager, options TypedOptions[request]) (TypedController[request], error) { + c, err := NewTypedUnmanaged(name, mgr, options) if err != nil { return nil, err } @@ -107,7 +128,16 @@ func New(name string, mgr manager.Manager, options Options) (Controller, error) // NewUnmanaged returns a new controller without adding it to the manager. The // caller is responsible for starting the returned controller. +// +// The name must be unique as it is used to identify the controller in metrics and logs. func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller, error) { + return NewTypedUnmanaged(name, mgr, options) +} + +// NewTypedUnmanaged returns a new typed controller without adding it to the manager. +// +// The name must be unique as it is used to identify the controller in metrics and logs. +func NewTypedUnmanaged[request comparable](name string, mgr manager.Manager, options TypedOptions[request]) (TypedController[request], error) { if options.Reconciler == nil { return nil, fmt.Errorf("must specify Reconciler") } @@ -116,13 +146,23 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller return nil, fmt.Errorf("must specify Name for Controller") } + if options.SkipNameValidation == nil { + options.SkipNameValidation = mgr.GetControllerOptions().SkipNameValidation + } + + if options.SkipNameValidation == nil || !*options.SkipNameValidation { + if err := checkName(name); err != nil { + return nil, err + } + } + if options.LogConstructor == nil { log := mgr.GetLogger().WithValues( "controller", name, ) - options.LogConstructor = func(req *reconcile.Request) logr.Logger { + options.LogConstructor = func(in *request) logr.Logger { log := log - if req != nil { + if req, ok := any(in).(*reconcile.Request); ok && req != nil { log = log.WithValues( "object", klog.KRef(req.Namespace, req.Name), "namespace", req.Namespace, "name", req.Name, @@ -149,12 +189,12 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller } if options.RateLimiter == nil { - options.RateLimiter = workqueue.DefaultControllerRateLimiter() + options.RateLimiter = workqueue.DefaultTypedControllerRateLimiter[request]() } if options.NewQueue == nil { - options.NewQueue = func(controllerName string, rateLimiter ratelimiter.RateLimiter) workqueue.RateLimitingInterface { - return workqueue.NewRateLimitingQueueWithConfig(rateLimiter, workqueue.RateLimitingQueueConfig{ + options.NewQueue = func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] { + return workqueue.NewTypedRateLimitingQueueWithConfig(rateLimiter, workqueue.TypedRateLimitingQueueConfig[request]{ Name: controllerName, }) } @@ -169,7 +209,7 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller } // Create controller with dependencies set - return &controller.Controller{ + return &controller.Controller[request]{ Do: options.Reconciler, RateLimiter: options.RateLimiter, NewQueue: options.NewQueue, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/name.go similarity index 52% rename from vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/controller/name.go index 565a3a227f..0e71a01c66 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/name.go @@ -14,17 +14,30 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ratelimiter - -import "time" - -// RateLimiter is an identical interface of client-go workqueue RateLimiter. -type RateLimiter interface { - // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing - // or for success, we'll stop tracking it - Forget(item interface{}) - // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int +package controller + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var nameLock sync.Mutex +var usedNames sets.Set[string] + +func checkName(name string) error { + nameLock.Lock() + defer nameLock.Unlock() + if usedNames == nil { + usedNames = sets.Set[string]{} + } + + if usedNames.Has(name) { + return fmt.Errorf("controller with name %s already exists. Controller names must be unique to avoid multiple controllers reporting to the same metric", name) + } + + usedNames.Insert(name) + + return nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go index e99c210072..81229fc2d3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go @@ -37,26 +37,26 @@ type GenericEvent = TypedGenericEvent[client.Object] // TypedCreateEvent is an event where a Kubernetes object was created. TypedCreateEvent should be generated // by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler. -type TypedCreateEvent[T any] struct { +type TypedCreateEvent[object any] struct { // Object is the object from the event - Object T + Object object } // TypedUpdateEvent is an event where a Kubernetes object was updated. TypedUpdateEvent should be generated // by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler. -type TypedUpdateEvent[T any] struct { +type TypedUpdateEvent[object any] struct { // ObjectOld is the object from the event - ObjectOld T + ObjectOld object // ObjectNew is the object from the event - ObjectNew T + ObjectNew object } // TypedDeleteEvent is an event where a Kubernetes object was deleted. TypedDeleteEvent should be generated // by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler. -type TypedDeleteEvent[T any] struct { +type TypedDeleteEvent[object any] struct { // Object is the object from the event - Object T + Object object // DeleteStateUnknown is true if the Delete event was missed but we identified the object // as having been deleted. @@ -66,7 +66,7 @@ type TypedDeleteEvent[T any] struct { // TypedGenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). // TypedGenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an // handler.TypedEventHandler. -type TypedGenericEvent[T any] struct { +type TypedGenericEvent[object any] struct { // Object is the object from the event - Object T + Object object } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index c9c7693854..1a1d1ab2f4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -44,10 +44,10 @@ type EnqueueRequestForObject = TypedEnqueueRequestForObject[client.Object] // Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. // // TypedEnqueueRequestForObject is experimental and subject to future change. -type TypedEnqueueRequestForObject[T client.Object] struct{} +type TypedEnqueueRequestForObject[object client.Object] struct{} // Create implements EventHandler. -func (e *TypedEnqueueRequestForObject[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) { +func (e *TypedEnqueueRequestForObject[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { if isNil(evt.Object) { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return @@ -59,7 +59,7 @@ func (e *TypedEnqueueRequestForObject[T]) Create(ctx context.Context, evt event. } // Update implements EventHandler. -func (e *TypedEnqueueRequestForObject[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) { +func (e *TypedEnqueueRequestForObject[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { switch { case !isNil(evt.ObjectNew): q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ @@ -77,7 +77,7 @@ func (e *TypedEnqueueRequestForObject[T]) Update(ctx context.Context, evt event. } // Delete implements EventHandler. -func (e *TypedEnqueueRequestForObject[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) { +func (e *TypedEnqueueRequestForObject[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { if isNil(evt.Object) { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return @@ -89,7 +89,7 @@ func (e *TypedEnqueueRequestForObject[T]) Delete(ctx context.Context, evt event. } // Generic implements EventHandler. -func (e *TypedEnqueueRequestForObject[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) { +func (e *TypedEnqueueRequestForObject[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { if isNil(evt.Object) { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) return diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 6e34e2ae45..491bc40c42 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -27,13 +27,13 @@ import ( // MapFunc is the signature required for enqueueing requests from a generic function. // This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. -type MapFunc = TypedMapFunc[client.Object] +type MapFunc = TypedMapFunc[client.Object, reconcile.Request] // TypedMapFunc is the signature required for enqueueing requests from a generic function. // This type is usually used with EnqueueRequestsFromTypedMapFunc when registering an event handler. // // TypedMapFunc is experimental and subject to future change. -type TypedMapFunc[T any] func(context.Context, T) []reconcile.Request +type TypedMapFunc[object any, request comparable] func(context.Context, object) []request // EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection // of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects @@ -61,46 +61,62 @@ func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler { // objects and both sets of Requests are enqueue. // // TypedEnqueueRequestsFromMapFunc is experimental and subject to future change. -func TypedEnqueueRequestsFromMapFunc[T any](fn TypedMapFunc[T]) TypedEventHandler[T] { - return &enqueueRequestsFromMapFunc[T]{ +func TypedEnqueueRequestsFromMapFunc[object any, request comparable](fn TypedMapFunc[object, request]) TypedEventHandler[object, request] { + return &enqueueRequestsFromMapFunc[object, request]{ toRequests: fn, } } -var _ EventHandler = &enqueueRequestsFromMapFunc[client.Object]{} +var _ EventHandler = &enqueueRequestsFromMapFunc[client.Object, reconcile.Request]{} -type enqueueRequestsFromMapFunc[T any] struct { +type enqueueRequestsFromMapFunc[object any, request comparable] struct { // Mapper transforms the argument into a slice of keys to be reconciled - toRequests TypedMapFunc[T] + toRequests TypedMapFunc[object, request] } // Create implements EventHandler. -func (e *enqueueRequestsFromMapFunc[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} +func (e *enqueueRequestsFromMapFunc[object, request]) Create( + ctx context.Context, + evt event.TypedCreateEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + reqs := map[request]empty{} e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Update implements EventHandler. -func (e *enqueueRequestsFromMapFunc[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} +func (e *enqueueRequestsFromMapFunc[object, request]) Update( + ctx context.Context, + evt event.TypedUpdateEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + reqs := map[request]empty{} e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs) e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs) } // Delete implements EventHandler. -func (e *enqueueRequestsFromMapFunc[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} +func (e *enqueueRequestsFromMapFunc[object, request]) Delete( + ctx context.Context, + evt event.TypedDeleteEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + reqs := map[request]empty{} e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Generic implements EventHandler. -func (e *enqueueRequestsFromMapFunc[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) { - reqs := map[reconcile.Request]empty{} +func (e *enqueueRequestsFromMapFunc[object, request]) Generic( + ctx context.Context, + evt event.TypedGenericEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + reqs := map[request]empty{} e.mapAndEnqueue(ctx, q, evt.Object, reqs) } -func (e *enqueueRequestsFromMapFunc[T]) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object T, reqs map[reconcile.Request]empty) { - for _, req := range e.toRequests(ctx, object) { +func (e *enqueueRequestsFromMapFunc[object, request]) mapAndEnqueue(ctx context.Context, q workqueue.TypedRateLimitingInterface[request], o object, reqs map[request]empty) { + for _, req := range e.toRequests(ctx, o) { _, ok := reqs[req] if !ok { q.Add(req) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 052a3140e1..1680043b46 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -61,8 +61,8 @@ func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, owne // - a handler.typedEnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true. // // TypedEnqueueRequestForOwner is experimental and subject to future change. -func TypedEnqueueRequestForOwner[T client.Object](scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) TypedEventHandler[T] { - e := &enqueueRequestForOwner[T]{ +func TypedEnqueueRequestForOwner[object client.Object](scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) TypedEventHandler[object, reconcile.Request] { + e := &enqueueRequestForOwner[object]{ ownerType: ownerType, mapper: mapper, } @@ -86,7 +86,7 @@ type enqueueRequestForOwnerInterface interface { setIsController(bool) } -type enqueueRequestForOwner[T client.Object] struct { +type enqueueRequestForOwner[object client.Object] struct { // ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. ownerType runtime.Object @@ -100,12 +100,12 @@ type enqueueRequestForOwner[T client.Object] struct { mapper meta.RESTMapper } -func (e *enqueueRequestForOwner[T]) setIsController(isController bool) { +func (e *enqueueRequestForOwner[object]) setIsController(isController bool) { e.isController = isController } // Create implements EventHandler. -func (e *enqueueRequestForOwner[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner[object]) Create(ctx context.Context, evt event.TypedCreateEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -114,7 +114,7 @@ func (e *enqueueRequestForOwner[T]) Create(ctx context.Context, evt event.TypedC } // Update implements EventHandler. -func (e *enqueueRequestForOwner[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner[object]) Update(ctx context.Context, evt event.TypedUpdateEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.ObjectOld, reqs) e.getOwnerReconcileRequest(evt.ObjectNew, reqs) @@ -124,7 +124,7 @@ func (e *enqueueRequestForOwner[T]) Update(ctx context.Context, evt event.TypedU } // Delete implements EventHandler. -func (e *enqueueRequestForOwner[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner[object]) Delete(ctx context.Context, evt event.TypedDeleteEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -133,7 +133,7 @@ func (e *enqueueRequestForOwner[T]) Delete(ctx context.Context, evt event.TypedD } // Generic implements EventHandler. -func (e *enqueueRequestForOwner[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner[object]) Generic(ctx context.Context, evt event.TypedGenericEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -143,7 +143,7 @@ func (e *enqueueRequestForOwner[T]) Generic(ctx context.Context, evt event.Typed // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. -func (e *enqueueRequestForOwner[T]) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { +func (e *enqueueRequestForOwner[object]) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { // Get the kinds of the type kinds, _, err := scheme.ObjectKinds(e.ownerType) if err != nil { @@ -163,10 +163,10 @@ func (e *enqueueRequestForOwner[T]) parseOwnerTypeGroupKind(scheme *runtime.Sche // getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile // owners of object that match e.OwnerType. -func (e *enqueueRequestForOwner[T]) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { +func (e *enqueueRequestForOwner[object]) getOwnerReconcileRequest(obj metav1.Object, result map[reconcile.Request]empty) { // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested // by the user - for _, ref := range e.getOwnersReferences(object) { + for _, ref := range e.getOwnersReferences(obj) { // Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType refGV, err := schema.ParseGroupVersion(ref.APIVersion) if err != nil { @@ -192,7 +192,7 @@ func (e *enqueueRequestForOwner[T]) getOwnerReconcileRequest(object metav1.Objec return } if mapping.Scope.Name() != meta.RESTScopeNameRoot { - request.Namespace = object.GetNamespace() + request.Namespace = obj.GetNamespace() } result[request] = empty{} @@ -203,17 +203,17 @@ func (e *enqueueRequestForOwner[T]) getOwnerReconcileRequest(object metav1.Objec // getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) // - if IsController is false: take all OwnerReferences. -func (e *enqueueRequestForOwner[T]) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { - if object == nil { +func (e *enqueueRequestForOwner[object]) getOwnersReferences(obj metav1.Object) []metav1.OwnerReference { + if obj == nil { return nil } // If not filtered as Controller only, then use all the OwnerReferences if !e.isController { - return object.GetOwnerReferences() + return obj.GetOwnerReferences() } // If filtered to a Controller, only take the Controller OwnerReference - if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { + if ownerRef := metav1.GetControllerOf(obj); ownerRef != nil { return []metav1.OwnerReference{*ownerRef} } // No Controller OwnerReference found diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 1756ffefa3..ea4bcee31e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event @@ -42,7 +43,7 @@ import ( // // Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface. // Most users shouldn't need to implement their own EventHandler. -type EventHandler TypedEventHandler[client.Object] +type EventHandler = TypedEventHandler[client.Object, reconcile.Request] // TypedEventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). TypedEventHandlers map an Event // for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an @@ -64,70 +65,70 @@ type EventHandler TypedEventHandler[client.Object] // Most users shouldn't need to implement their own TypedEventHandler. // // TypedEventHandler is experimental and subject to future change. -type TypedEventHandler[T any] interface { +type TypedEventHandler[object any, request comparable] interface { // Create is called in response to a create event - e.g. Pod Creation. - Create(context.Context, event.TypedCreateEvent[T], workqueue.RateLimitingInterface) + Create(context.Context, event.TypedCreateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Update is called in response to an update event - e.g. Pod Updated. - Update(context.Context, event.TypedUpdateEvent[T], workqueue.RateLimitingInterface) + Update(context.Context, event.TypedUpdateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Delete is called in response to a delete event - e.g. Pod Deleted. - Delete(context.Context, event.TypedDeleteEvent[T], workqueue.RateLimitingInterface) + Delete(context.Context, event.TypedDeleteEvent[object], workqueue.TypedRateLimitingInterface[request]) // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or // external trigger request - e.g. reconcile Autoscaling, or a Webhook. - Generic(context.Context, event.TypedGenericEvent[T], workqueue.RateLimitingInterface) + Generic(context.Context, event.TypedGenericEvent[object], workqueue.TypedRateLimitingInterface[request]) } var _ EventHandler = Funcs{} // Funcs implements eventhandler. -type Funcs = TypedFuncs[client.Object] +type Funcs = TypedFuncs[client.Object, reconcile.Request] // TypedFuncs implements eventhandler. // // TypedFuncs is experimental and subject to future change. -type TypedFuncs[T any] struct { +type TypedFuncs[object any, request comparable] struct { // Create is called in response to an add event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - CreateFunc func(context.Context, event.TypedCreateEvent[T], workqueue.RateLimitingInterface) + CreateFunc func(context.Context, event.TypedCreateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Update is called in response to an update event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - UpdateFunc func(context.Context, event.TypedUpdateEvent[T], workqueue.RateLimitingInterface) + UpdateFunc func(context.Context, event.TypedUpdateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Delete is called in response to a delete event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - DeleteFunc func(context.Context, event.TypedDeleteEvent[T], workqueue.RateLimitingInterface) + DeleteFunc func(context.Context, event.TypedDeleteEvent[object], workqueue.TypedRateLimitingInterface[request]) // GenericFunc is called in response to a generic event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - GenericFunc func(context.Context, event.TypedGenericEvent[T], workqueue.RateLimitingInterface) + GenericFunc func(context.Context, event.TypedGenericEvent[object], workqueue.TypedRateLimitingInterface[request]) } // Create implements EventHandler. -func (h TypedFuncs[T]) Create(ctx context.Context, e event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) { +func (h TypedFuncs[object, request]) Create(ctx context.Context, e event.TypedCreateEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.CreateFunc != nil { h.CreateFunc(ctx, e, q) } } // Delete implements EventHandler. -func (h TypedFuncs[T]) Delete(ctx context.Context, e event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) { +func (h TypedFuncs[object, request]) Delete(ctx context.Context, e event.TypedDeleteEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.DeleteFunc != nil { h.DeleteFunc(ctx, e, q) } } // Update implements EventHandler. -func (h TypedFuncs[T]) Update(ctx context.Context, e event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) { +func (h TypedFuncs[object, request]) Update(ctx context.Context, e event.TypedUpdateEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.UpdateFunc != nil { h.UpdateFunc(ctx, e, q) } } // Generic implements EventHandler. -func (h TypedFuncs[T]) Generic(ctx context.Context, e event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) { +func (h TypedFuncs[object, request]) Generic(ctx context.Context, e event.TypedGenericEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.GenericFunc != nil { h.GenericFunc(ctx, e, q) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index 9c709404b5..dfe407f3b8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -31,13 +31,12 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) // Controller implements controller.Controller. -type Controller struct { +type Controller[request comparable] struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. Name string @@ -47,19 +46,19 @@ type Controller struct { // Reconciler is a function that can be called at any time with the Name / Namespace of an object and // ensures that the state of the system matches the state specified in the object. // Defaults to the DefaultReconcileFunc. - Do reconcile.Reconciler + Do reconcile.TypedReconciler[request] // RateLimiter is used to limit how frequently requests may be queued into the work queue. - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[request] // NewQueue constructs the queue for this controller once the controller is ready to start. // This is a func because the standard Kubernetes work queues start themselves immediately, which // leads to goroutine leaks if something calls controller.New repeatedly. - NewQueue func(controllerName string, rateLimiter ratelimiter.RateLimiter) workqueue.RateLimitingInterface + NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] // Queue is an listeningQueue that listens for events from Informers and adds object keys to // the Queue for processing - Queue workqueue.RateLimitingInterface + Queue workqueue.TypedRateLimitingInterface[request] // mu is used to synchronize Controller setup mu sync.Mutex @@ -79,15 +78,16 @@ type Controller struct { CacheSyncTimeout time.Duration // startWatches maintains a list of sources, handlers, and predicates to start when the controller is started. - startWatches []source.Source + startWatches []source.TypedSource[request] // LogConstructor is used to construct a logger to then log messages to users during reconciliation, // or for example when a watch is started. // Note: LogConstructor has to be able to handle nil requests as we are also using it // outside the context of a reconciliation. - LogConstructor func(request *reconcile.Request) logr.Logger + LogConstructor func(request *request) logr.Logger // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to true. RecoverPanic *bool // LeaderElected indicates whether the controller is leader elected or always running. @@ -95,12 +95,14 @@ type Controller struct { } // Reconcile implements reconcile.Reconciler. -func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { +func (c *Controller[request]) Reconcile(ctx context.Context, req request) (_ reconcile.Result, err error) { defer func() { if r := recover(); r != nil { - if c.RecoverPanic != nil && *c.RecoverPanic { + ctrlmetrics.ReconcilePanics.WithLabelValues(c.Name).Inc() + + if c.RecoverPanic == nil || *c.RecoverPanic { for _, fn := range utilruntime.PanicHandlers { - fn(r) + fn(ctx, r) } err = fmt.Errorf("panic: %v [recovered]", r) return @@ -115,7 +117,7 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ re } // Watch implements controller.Controller. -func (c *Controller) Watch(src source.Source) error { +func (c *Controller[request]) Watch(src source.TypedSource[request]) error { c.mu.Lock() defer c.mu.Unlock() @@ -132,7 +134,7 @@ func (c *Controller) Watch(src source.Source) error { } // NeedLeaderElection implements the manager.LeaderElectionRunnable interface. -func (c *Controller) NeedLeaderElection() bool { +func (c *Controller[request]) NeedLeaderElection() bool { if c.LeaderElected == nil { return true } @@ -140,7 +142,7 @@ func (c *Controller) NeedLeaderElection() bool { } // Start implements controller.Controller. -func (c *Controller) Start(ctx context.Context) error { +func (c *Controller[request]) Start(ctx context.Context) error { // use an IIFE to get proper lock handling // but lock outside to get proper handling of the queue shutdown c.mu.Lock() @@ -240,7 +242,7 @@ func (c *Controller) Start(ctx context.Context) error { // processNextWorkItem will read a single work item off the workqueue and // attempt to process it, by calling the reconcileHandler. -func (c *Controller) processNextWorkItem(ctx context.Context) bool { +func (c *Controller[request]) processNextWorkItem(ctx context.Context) bool { obj, shutdown := c.Queue.Get() if shutdown { // Stop working @@ -269,35 +271,25 @@ const ( labelSuccess = "success" ) -func (c *Controller) initMetrics() { - ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0) - ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0) +func (c *Controller[request]) initMetrics() { ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0) + ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Add(0) + ctrlmetrics.ReconcilePanics.WithLabelValues(c.Name).Add(0) ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles)) + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0) } -func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { +func (c *Controller[request]) reconcileHandler(ctx context.Context, req request) { // Update metrics after processing each item reconcileStartTS := time.Now() defer func() { c.updateMetrics(time.Since(reconcileStartTS)) }() - // Make sure that the object is a valid request. - req, ok := obj.(reconcile.Request) - if !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - c.Queue.Forget(obj) - c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj) - // Return true, don't take a break - return - } - log := c.LogConstructor(&req) reconcileID := uuid.NewUUID() @@ -328,7 +320,7 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { // along with a non-nil error. But this is intended as // We need to drive to stable reconcile loops before queuing due // to result.RequestAfter - c.Queue.Forget(obj) + c.Queue.Forget(req) c.Queue.AddAfter(req, result.RequeueAfter) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() case result.Requeue: @@ -339,18 +331,18 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { log.V(5).Info("Reconcile successful") // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. - c.Queue.Forget(obj) + c.Queue.Forget(req) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() } } // GetLogger returns this controller's logger. -func (c *Controller) GetLogger() logr.Logger { +func (c *Controller[request]) GetLogger() logr.Logger { return c.LogConstructor(nil) } // updateMetrics updates prometheus metrics within the controller. -func (c *Controller) updateMetrics(reconcileTime time.Duration) { +func (c *Controller[request]) updateMetrics(reconcileTime time.Duration) { ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index b74ce062be..fbf15669d5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -46,6 +46,13 @@ var ( Help: "Total number of terminal reconciliation errors per controller", }, []string{"controller"}) + // ReconcilePanics is a prometheus counter metrics which holds the total + // number of panics from the Reconciler. + ReconcilePanics = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_panics_total", + Help: "Total number of reconciliation panics per controller", + }, []string{"controller"}) + // ReconcileTime is a prometheus metric which keeps track of the duration // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ @@ -75,6 +82,7 @@ func init() { ReconcileTotal, ReconcileErrors, TerminalReconcileErrors, + ReconcilePanics, ReconcileTime, WorkerCount, ActiveWorkers, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go index 8651ea453e..38432a1a79 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go @@ -33,8 +33,12 @@ import ( var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") // NewEventHandler creates a new EventHandler. -func NewEventHandler[T client.Object](ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.TypedEventHandler[T], predicates []predicate.TypedPredicate[T]) *EventHandler[T] { - return &EventHandler[T]{ +func NewEventHandler[object client.Object, request comparable]( + ctx context.Context, + queue workqueue.TypedRateLimitingInterface[request], + handler handler.TypedEventHandler[object, request], + predicates []predicate.TypedPredicate[object]) *EventHandler[object, request] { + return &EventHandler[object, request]{ ctx: ctx, handler: handler, queue: queue, @@ -43,19 +47,19 @@ func NewEventHandler[T client.Object](ctx context.Context, queue workqueue.RateL } // EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. -type EventHandler[T client.Object] struct { +type EventHandler[object client.Object, request comparable] struct { // ctx stores the context that created the event handler // that is used to propagate cancellation signals to each handler function. ctx context.Context - handler handler.TypedEventHandler[T] - queue workqueue.RateLimitingInterface - predicates []predicate.TypedPredicate[T] + handler handler.TypedEventHandler[object, request] + queue workqueue.TypedRateLimitingInterface[request] + predicates []predicate.TypedPredicate[object] } // HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs // TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27 -func (e *EventHandler[T]) HandlerFuncs() cache.ResourceEventHandlerFuncs { +func (e *EventHandler[object, request]) HandlerFuncs() cache.ResourceEventHandlerFuncs { return cache.ResourceEventHandlerFuncs{ AddFunc: e.OnAdd, UpdateFunc: e.OnUpdate, @@ -64,11 +68,11 @@ func (e *EventHandler[T]) HandlerFuncs() cache.ResourceEventHandlerFuncs { } // OnAdd creates CreateEvent and calls Create on EventHandler. -func (e *EventHandler[T]) OnAdd(obj interface{}) { - c := event.TypedCreateEvent[T]{} +func (e *EventHandler[object, request]) OnAdd(obj interface{}) { + c := event.TypedCreateEvent[object]{} // Pull Object out of the object - if o, ok := obj.(T); ok { + if o, ok := obj.(object); ok { c.Object = o } else { log.Error(nil, "OnAdd missing Object", @@ -89,10 +93,10 @@ func (e *EventHandler[T]) OnAdd(obj interface{}) { } // OnUpdate creates UpdateEvent and calls Update on EventHandler. -func (e *EventHandler[T]) OnUpdate(oldObj, newObj interface{}) { - u := event.TypedUpdateEvent[T]{} +func (e *EventHandler[object, request]) OnUpdate(oldObj, newObj interface{}) { + u := event.TypedUpdateEvent[object]{} - if o, ok := oldObj.(T); ok { + if o, ok := oldObj.(object); ok { u.ObjectOld = o } else { log.Error(nil, "OnUpdate missing ObjectOld", @@ -101,7 +105,7 @@ func (e *EventHandler[T]) OnUpdate(oldObj, newObj interface{}) { } // Pull Object out of the object - if o, ok := newObj.(T); ok { + if o, ok := newObj.(object); ok { u.ObjectNew = o } else { log.Error(nil, "OnUpdate missing ObjectNew", @@ -122,8 +126,8 @@ func (e *EventHandler[T]) OnUpdate(oldObj, newObj interface{}) { } // OnDelete creates DeleteEvent and calls Delete on EventHandler. -func (e *EventHandler[T]) OnDelete(obj interface{}) { - d := event.TypedDeleteEvent[T]{} +func (e *EventHandler[object, request]) OnDelete(obj interface{}) { + d := event.TypedDeleteEvent[object]{} // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a // DeleteFinalStateUnknown struct, so the object needs to be pulled out. @@ -149,7 +153,7 @@ func (e *EventHandler[T]) OnDelete(obj interface{}) { } // Pull Object out of the object - if o, ok := obj.(T); ok { + if o, ok := obj.(object); ok { d.Object = o } else { log.Error(nil, "OnDelete missing Object", diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go index 3a8db96e3c..4999edc432 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go @@ -19,16 +19,16 @@ import ( ) // Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). -type Kind[T client.Object] struct { +type Kind[object client.Object, request comparable] struct { // Type is the type of object to watch. e.g. &v1.Pod{} - Type T + Type object // Cache used to watch APIs Cache cache.Cache - Handler handler.TypedEventHandler[T] + Handler handler.TypedEventHandler[object, request] - Predicates []predicate.TypedPredicate[T] + Predicates []predicate.TypedPredicate[object] // startedErr may contain an error if one was encountered during startup. If its closed and does not // contain an error, startup and syncing finished. @@ -38,7 +38,7 @@ type Kind[T client.Object] struct { // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. -func (ks *Kind[T]) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error { +func (ks *Kind[object, request]) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[request]) error { if isNil(ks.Type) { return fmt.Errorf("must create Kind with a non-nil object") } @@ -102,7 +102,7 @@ func (ks *Kind[T]) Start(ctx context.Context, queue workqueue.RateLimitingInterf return nil } -func (ks *Kind[T]) String() string { +func (ks *Kind[object, request]) String() string { if !isNil(ks.Type) { return fmt.Sprintf("kind source: %T", ks.Type) } @@ -111,7 +111,7 @@ func (ks *Kind[T]) String() string { // WaitForSync implements SyncingSource to allow controllers to wait with starting // workers until the cache is synced. -func (ks *Kind[T]) WaitForSync(ctx context.Context) error { +func (ks *Kind[object, request]) WaitForSync(ctx context.Context) error { select { case err := <-ks.startedErr: return err diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 2ce02b105c..e5204a7506 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -187,7 +187,7 @@ func (cm *controllerManager) AddMetricsServerExtraHandler(path string, handler h return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") } if cm.metricsServer == nil { - cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler %q will be ignored", path) + cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler will be ignored", "path", path) return nil } if err := cm.metricsServer.AddExtraHandler(path, handler); err != nil { @@ -351,6 +351,16 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // Initialize the internal context. cm.internalCtx, cm.internalCancel = context.WithCancel(ctx) + // Leader elector must be created before defer that contains engageStopProcedure function + // https://github.com/kubernetes-sigs/controller-runtime/issues/2873 + var leaderElector *leaderelection.LeaderElector + if cm.resourceLock != nil { + leaderElector, err = cm.initLeaderElector() + if err != nil { + return fmt.Errorf("failed during initialization leader election process: %w", err) + } + } + // This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request stopComplete := make(chan struct{}) defer close(stopComplete) @@ -433,19 +443,22 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { { ctx, cancel := context.WithCancel(context.Background()) cm.leaderElectionCancel = cancel - go func() { - if cm.resourceLock != nil { - if err := cm.startLeaderElection(ctx); err != nil { - cm.errChan <- err - } - } else { + if leaderElector != nil { + // Start the leader elector process + go func() { + leaderElector.Run(ctx) + <-ctx.Done() + close(cm.leaderElectionStopped) + }() + } else { + go func() { // Treat not having leader election enabled the same as being elected. if err := cm.startLeaderElectionRunnables(); err != nil { cm.errChan <- err } close(cm.elected) - } - }() + }() + } } ready = true @@ -494,8 +507,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e cm.internalCancel() }) select { - case err, ok := <-cm.errChan: - if ok { + case err := <-cm.errChan: + if !errors.Is(err, context.Canceled) { cm.logger.Error(err, "error received after stop sequence was engaged") } case <-stopComplete: @@ -564,12 +577,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e return nil } -func (cm *controllerManager) startLeaderElectionRunnables() error { - return cm.runnables.LeaderElection.Start(cm.internalCtx) -} - -func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error) { - l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ +func (cm *controllerManager) initLeaderElector() (*leaderelection.LeaderElector, error) { + leaderElector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ Lock: cm.resourceLock, LeaseDuration: cm.leaseDuration, RenewDeadline: cm.renewDeadline, @@ -599,16 +608,14 @@ func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error Name: cm.leaderElectionID, }) if err != nil { - return err + return nil, err } - // Start the leader elector process - go func() { - l.Run(ctx) - <-ctx.Done() - close(cm.leaderElectionStopped) - }() - return nil + return leaderElector, nil +} + +func (cm *controllerManager) startLeaderElectionRunnables() error { + return cm.runnables.LeaderElection.Start(cm.internalCtx) } func (cm *controllerManager) Elected() <-chan struct{} { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go index cff1de4c1c..590653e70f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -42,27 +42,27 @@ var ( Subsystem: WorkQueueSubsystem, Name: DepthKey, Help: "Current depth of workqueue", - }, []string{"name"}) + }, []string{"name", "controller"}) adds = prometheus.NewCounterVec(prometheus.CounterOpts{ Subsystem: WorkQueueSubsystem, Name: AddsKey, Help: "Total number of adds handled by workqueue", - }, []string{"name"}) + }, []string{"name", "controller"}) latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: WorkQueueSubsystem, Name: QueueLatencyKey, Help: "How long in seconds an item stays in workqueue before being requested", Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), - }, []string{"name"}) + }, []string{"name", "controller"}) workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: WorkQueueSubsystem, Name: WorkDurationKey, Help: "How long in seconds processing an item from workqueue takes.", Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), - }, []string{"name"}) + }, []string{"name", "controller"}) unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: WorkQueueSubsystem, @@ -71,20 +71,20 @@ var ( "is in progress and hasn't been observed by work_duration. Large " + "values indicate stuck threads. One can deduce the number of stuck " + "threads by observing the rate at which this increases.", - }, []string{"name"}) + }, []string{"name", "controller"}) longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: WorkQueueSubsystem, Name: LongestRunningProcessorKey, Help: "How many seconds has the longest running " + "processor for workqueue been running.", - }, []string{"name"}) + }, []string{"name", "controller"}) retries = prometheus.NewCounterVec(prometheus.CounterOpts{ Subsystem: WorkQueueSubsystem, Name: RetriesKey, Help: "Total number of retries handled by workqueue", - }, []string{"name"}) + }, []string{"name", "controller"}) ) func init() { @@ -102,29 +102,29 @@ func init() { type workqueueMetricsProvider struct{} func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { - return depth.WithLabelValues(name) + return depth.WithLabelValues(name, name) } func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { - return adds.WithLabelValues(name) + return adds.WithLabelValues(name, name) } func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { - return latency.WithLabelValues(name) + return latency.WithLabelValues(name, name) } func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { - return workDuration.WithLabelValues(name) + return workDuration.WithLabelValues(name, name) } func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { - return unfinished.WithLabelValues(name) + return unfinished.WithLabelValues(name, name) } func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { - return longestRunningProcessor.WithLabelValues(name) + return longestRunningProcessor.WithLabelValues(name, name) } func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { - return retries.WithLabelValues(name) + return retries.WithLabelValues(name, name) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index f74889d1cc..90918db57a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -33,18 +33,18 @@ var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") type Predicate = TypedPredicate[client.Object] // TypedPredicate filters events before enqueuing the keys. -type TypedPredicate[T any] interface { +type TypedPredicate[object any] interface { // Create returns true if the Create event should be processed - Create(event.TypedCreateEvent[T]) bool + Create(event.TypedCreateEvent[object]) bool // Delete returns true if the Delete event should be processed - Delete(event.TypedDeleteEvent[T]) bool + Delete(event.TypedDeleteEvent[object]) bool // Update returns true if the Update event should be processed - Update(event.TypedUpdateEvent[T]) bool + Update(event.TypedUpdateEvent[object]) bool // Generic returns true if the Generic event should be processed - Generic(event.TypedGenericEvent[T]) bool + Generic(event.TypedGenericEvent[object]) bool } var _ Predicate = Funcs{} @@ -59,22 +59,22 @@ var _ Predicate = not[client.Object]{} type Funcs = TypedFuncs[client.Object] // TypedFuncs is a function that implements TypedPredicate. -type TypedFuncs[T any] struct { +type TypedFuncs[object any] struct { // Create returns true if the Create event should be processed - CreateFunc func(event.TypedCreateEvent[T]) bool + CreateFunc func(event.TypedCreateEvent[object]) bool // Delete returns true if the Delete event should be processed - DeleteFunc func(event.TypedDeleteEvent[T]) bool + DeleteFunc func(event.TypedDeleteEvent[object]) bool // Update returns true if the Update event should be processed - UpdateFunc func(event.TypedUpdateEvent[T]) bool + UpdateFunc func(event.TypedUpdateEvent[object]) bool // Generic returns true if the Generic event should be processed - GenericFunc func(event.TypedGenericEvent[T]) bool + GenericFunc func(event.TypedGenericEvent[object]) bool } // Create implements Predicate. -func (p TypedFuncs[T]) Create(e event.TypedCreateEvent[T]) bool { +func (p TypedFuncs[object]) Create(e event.TypedCreateEvent[object]) bool { if p.CreateFunc != nil { return p.CreateFunc(e) } @@ -82,7 +82,7 @@ func (p TypedFuncs[T]) Create(e event.TypedCreateEvent[T]) bool { } // Delete implements Predicate. -func (p TypedFuncs[T]) Delete(e event.TypedDeleteEvent[T]) bool { +func (p TypedFuncs[object]) Delete(e event.TypedDeleteEvent[object]) bool { if p.DeleteFunc != nil { return p.DeleteFunc(e) } @@ -90,7 +90,7 @@ func (p TypedFuncs[T]) Delete(e event.TypedDeleteEvent[T]) bool { } // Update implements Predicate. -func (p TypedFuncs[T]) Update(e event.TypedUpdateEvent[T]) bool { +func (p TypedFuncs[object]) Update(e event.TypedUpdateEvent[object]) bool { if p.UpdateFunc != nil { return p.UpdateFunc(e) } @@ -98,7 +98,7 @@ func (p TypedFuncs[T]) Update(e event.TypedUpdateEvent[T]) bool { } // Generic implements Predicate. -func (p TypedFuncs[T]) Generic(e event.TypedGenericEvent[T]) bool { +func (p TypedFuncs[object]) Generic(e event.TypedGenericEvent[object]) bool { if p.GenericFunc != nil { return p.GenericFunc(e) } @@ -128,35 +128,38 @@ func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { // NewTypedPredicateFuncs returns a predicate funcs that applies the given filter function // on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied // to the new object. -func NewTypedPredicateFuncs[T any](filter func(object T) bool) TypedFuncs[T] { - return TypedFuncs[T]{ - CreateFunc: func(e event.TypedCreateEvent[T]) bool { +func NewTypedPredicateFuncs[object any](filter func(object object) bool) TypedFuncs[object] { + return TypedFuncs[object]{ + CreateFunc: func(e event.TypedCreateEvent[object]) bool { return filter(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[T]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[object]) bool { return filter(e.ObjectNew) }, - DeleteFunc: func(e event.TypedDeleteEvent[T]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[object]) bool { return filter(e.Object) }, - GenericFunc: func(e event.TypedGenericEvent[T]) bool { + GenericFunc: func(e event.TypedGenericEvent[object]) bool { return filter(e.Object) }, } } // ResourceVersionChangedPredicate implements a default update predicate function on resource version change. -type ResourceVersionChangedPredicate struct { - Funcs +type ResourceVersionChangedPredicate = TypedResourceVersionChangedPredicate[client.Object] + +// TypedResourceVersionChangedPredicate implements a default update predicate function on resource version change. +type TypedResourceVersionChangedPredicate[T metav1.Object] struct { + TypedFuncs[T] } // Update implements default UpdateEvent filter for validating resource version change. -func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { - if e.ObjectOld == nil { +func (TypedResourceVersionChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool { + if isNil(e.ObjectOld) { log.Error(nil, "Update event has no old object to update", "event", e) return false } - if e.ObjectNew == nil { + if isNil(e.ObjectNew) { log.Error(nil, "Update event has no new object to update", "event", e) return false } @@ -198,12 +201,12 @@ type GenerationChangedPredicate = TypedGenerationChangedPredicate[client.Object] // // * With this predicate, any update events with writes only to the status field will not be reconciled. // So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. -type TypedGenerationChangedPredicate[T metav1.Object] struct { - TypedFuncs[T] +type TypedGenerationChangedPredicate[object metav1.Object] struct { + TypedFuncs[object] } // Update implements default UpdateEvent filter for validating generation change. -func (TypedGenerationChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool { +func (TypedGenerationChangedPredicate[object]) Update(e event.TypedUpdateEvent[object]) bool { if isNil(e.ObjectOld) { log.Error(nil, "Update event has no old object to update", "event", e) return false @@ -231,12 +234,12 @@ func (TypedGenerationChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bo type AnnotationChangedPredicate = TypedAnnotationChangedPredicate[client.Object] // TypedAnnotationChangedPredicate implements a default update predicate function on annotation change. -type TypedAnnotationChangedPredicate[T metav1.Object] struct { - TypedFuncs[T] +type TypedAnnotationChangedPredicate[object metav1.Object] struct { + TypedFuncs[object] } // Update implements default UpdateEvent filter for validating annotation change. -func (TypedAnnotationChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool { +func (TypedAnnotationChangedPredicate[object]) Update(e event.TypedUpdateEvent[object]) bool { if isNil(e.ObjectOld) { log.Error(nil, "Update event has no old object to update", "event", e) return false @@ -265,12 +268,12 @@ func (TypedAnnotationChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bo type LabelChangedPredicate = TypedLabelChangedPredicate[client.Object] // TypedLabelChangedPredicate implements a default update predicate function on label change. -type TypedLabelChangedPredicate[T metav1.Object] struct { - TypedFuncs[T] +type TypedLabelChangedPredicate[object metav1.Object] struct { + TypedFuncs[object] } // Update implements default UpdateEvent filter for checking label change. -func (TypedLabelChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool { +func (TypedLabelChangedPredicate[object]) Update(e event.TypedUpdateEvent[object]) bool { if isNil(e.ObjectOld) { log.Error(nil, "Update event has no old object to update", "event", e) return false @@ -284,15 +287,15 @@ func (TypedLabelChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool { } // And returns a composite predicate that implements a logical AND of the predicates passed to it. -func And[T any](predicates ...TypedPredicate[T]) TypedPredicate[T] { - return and[T]{predicates} +func And[object any](predicates ...TypedPredicate[object]) TypedPredicate[object] { + return and[object]{predicates} } -type and[T any] struct { - predicates []TypedPredicate[T] +type and[object any] struct { + predicates []TypedPredicate[object] } -func (a and[T]) Create(e event.TypedCreateEvent[T]) bool { +func (a and[object]) Create(e event.TypedCreateEvent[object]) bool { for _, p := range a.predicates { if !p.Create(e) { return false @@ -301,7 +304,7 @@ func (a and[T]) Create(e event.TypedCreateEvent[T]) bool { return true } -func (a and[T]) Update(e event.TypedUpdateEvent[T]) bool { +func (a and[object]) Update(e event.TypedUpdateEvent[object]) bool { for _, p := range a.predicates { if !p.Update(e) { return false @@ -310,7 +313,7 @@ func (a and[T]) Update(e event.TypedUpdateEvent[T]) bool { return true } -func (a and[T]) Delete(e event.TypedDeleteEvent[T]) bool { +func (a and[object]) Delete(e event.TypedDeleteEvent[object]) bool { for _, p := range a.predicates { if !p.Delete(e) { return false @@ -319,7 +322,7 @@ func (a and[T]) Delete(e event.TypedDeleteEvent[T]) bool { return true } -func (a and[T]) Generic(e event.TypedGenericEvent[T]) bool { +func (a and[object]) Generic(e event.TypedGenericEvent[object]) bool { for _, p := range a.predicates { if !p.Generic(e) { return false @@ -329,15 +332,15 @@ func (a and[T]) Generic(e event.TypedGenericEvent[T]) bool { } // Or returns a composite predicate that implements a logical OR of the predicates passed to it. -func Or[T any](predicates ...TypedPredicate[T]) TypedPredicate[T] { - return or[T]{predicates} +func Or[object any](predicates ...TypedPredicate[object]) TypedPredicate[object] { + return or[object]{predicates} } -type or[T any] struct { - predicates []TypedPredicate[T] +type or[object any] struct { + predicates []TypedPredicate[object] } -func (o or[T]) Create(e event.TypedCreateEvent[T]) bool { +func (o or[object]) Create(e event.TypedCreateEvent[object]) bool { for _, p := range o.predicates { if p.Create(e) { return true @@ -346,7 +349,7 @@ func (o or[T]) Create(e event.TypedCreateEvent[T]) bool { return false } -func (o or[T]) Update(e event.TypedUpdateEvent[T]) bool { +func (o or[object]) Update(e event.TypedUpdateEvent[object]) bool { for _, p := range o.predicates { if p.Update(e) { return true @@ -355,7 +358,7 @@ func (o or[T]) Update(e event.TypedUpdateEvent[T]) bool { return false } -func (o or[T]) Delete(e event.TypedDeleteEvent[T]) bool { +func (o or[object]) Delete(e event.TypedDeleteEvent[object]) bool { for _, p := range o.predicates { if p.Delete(e) { return true @@ -364,7 +367,7 @@ func (o or[T]) Delete(e event.TypedDeleteEvent[T]) bool { return false } -func (o or[T]) Generic(e event.TypedGenericEvent[T]) bool { +func (o or[object]) Generic(e event.TypedGenericEvent[object]) bool { for _, p := range o.predicates { if p.Generic(e) { return true @@ -374,27 +377,27 @@ func (o or[T]) Generic(e event.TypedGenericEvent[T]) bool { } // Not returns a predicate that implements a logical NOT of the predicate passed to it. -func Not[T any](predicate TypedPredicate[T]) TypedPredicate[T] { - return not[T]{predicate} +func Not[object any](predicate TypedPredicate[object]) TypedPredicate[object] { + return not[object]{predicate} } -type not[T any] struct { - predicate TypedPredicate[T] +type not[object any] struct { + predicate TypedPredicate[object] } -func (n not[T]) Create(e event.TypedCreateEvent[T]) bool { +func (n not[object]) Create(e event.TypedCreateEvent[object]) bool { return !n.predicate.Create(e) } -func (n not[T]) Update(e event.TypedUpdateEvent[T]) bool { +func (n not[object]) Update(e event.TypedUpdateEvent[object]) bool { return !n.predicate.Update(e) } -func (n not[T]) Delete(e event.TypedDeleteEvent[T]) bool { +func (n not[object]) Delete(e event.TypedDeleteEvent[object]) bool { return !n.predicate.Delete(e) } -func (n not[T]) Generic(e event.TypedGenericEvent[T]) bool { +func (n not[object]) Generic(e event.TypedGenericEvent[object]) bool { return !n.predicate.Generic(e) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index f1cce87c85..ee63f681cc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -89,7 +89,14 @@ driven by actual cluster state read from the apiserver or a local cache. For example if responding to a Pod Delete Event, the Request won't contain that a Pod was deleted, instead the reconcile function observes this when reading the cluster state and seeing the Pod as missing. */ -type Reconciler interface { +type Reconciler = TypedReconciler[Request] + +// TypedReconciler implements an API for a specific Resource by Creating, Updating or Deleting Kubernetes +// objects, or by making changes to systems external to the cluster (e.g. cloudproviders, github, etc). +// +// The request type is what event handlers put into the workqueue. The workqueue then de-duplicates identical +// requests. +type TypedReconciler[request comparable] interface { // Reconcile performs a full reconciliation for the object referred to by the Request. // // If the returned error is non-nil, the Result is ignored and the request will be @@ -101,40 +108,45 @@ type Reconciler interface { // // If the error is nil and result.RequeueAfter is zero and result.Requeue is true, the request // will be requeued using exponential backoff. - Reconcile(context.Context, Request) (Result, error) + Reconcile(context.Context, request) (Result, error) } // Func is a function that implements the reconcile interface. -type Func func(context.Context, Request) (Result, error) +type Func = TypedFunc[Request] + +// TypedFunc is a function that implements the reconcile interface. +type TypedFunc[request comparable] func(context.Context, request) (Result, error) var _ Reconciler = Func(nil) // Reconcile implements Reconciler. -func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } +func (r TypedFunc[request]) Reconcile(ctx context.Context, req request) (Result, error) { + return r(ctx, req) +} // ObjectReconciler is a specialized version of Reconciler that acts on instances of client.Object. Each reconciliation // event gets the associated object from Kubernetes before passing it to Reconcile. An ObjectReconciler can be used in // Builder.Complete by calling AsReconciler. See Reconciler for more details. -type ObjectReconciler[T client.Object] interface { - Reconcile(context.Context, T) (Result, error) +type ObjectReconciler[object client.Object] interface { + Reconcile(context.Context, object) (Result, error) } // AsReconciler creates a Reconciler based on the given ObjectReconciler. -func AsReconciler[T client.Object](client client.Client, rec ObjectReconciler[T]) Reconciler { - return &objectReconcilerAdapter[T]{ +func AsReconciler[object client.Object](client client.Client, rec ObjectReconciler[object]) Reconciler { + return &objectReconcilerAdapter[object]{ objReconciler: rec, client: client, } } -type objectReconcilerAdapter[T client.Object] struct { - objReconciler ObjectReconciler[T] +type objectReconcilerAdapter[object client.Object] struct { + objReconciler ObjectReconciler[object] client client.Client } // Reconcile implements Reconciler. -func (a *objectReconcilerAdapter[T]) Reconcile(ctx context.Context, req Request) (Result, error) { - o := reflect.New(reflect.TypeOf(*new(T)).Elem()).Interface().(T) +func (a *objectReconcilerAdapter[object]) Reconcile(ctx context.Context, req Request) (Result, error) { + o := reflect.New(reflect.TypeOf(*new(object)).Elem()).Interface().(object) if err := a.client.Get(ctx, req.NamespacedName, o); err != nil { return Result{}, client.IgnoreNotFound(err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index 26e53022bf..267a6470b8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" internal "sigs.k8s.io/controller-runtime/pkg/internal/source" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -41,45 +42,74 @@ import ( // * Use Channel for events originating outside the cluster (e.g. GitHub Webhook callback, Polling external urls). // // Users may build their own Source implementations. -type Source interface { - // Start is internal and should be called only by the Controller to register an EventHandler with the Informer - // to enqueue reconcile.Requests. - Start(context.Context, workqueue.RateLimitingInterface) error +type Source = TypedSource[reconcile.Request] + +// TypedSource is a generic source of events (e.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) +// which should be processed by event.EventHandlers to enqueue a request. +// +// * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update). +// +// * Use Channel for events originating outside the cluster (e.g. GitHub Webhook callback, Polling external urls). +// +// Users may build their own Source implementations. +type TypedSource[request comparable] interface { + // Start is internal and should be called only by the Controller to start the source. + // Start must be non-blocking. + Start(context.Context, workqueue.TypedRateLimitingInterface[request]) error } // SyncingSource is a source that needs syncing prior to being usable. The controller // will call its WaitForSync prior to starting workers. -type SyncingSource interface { - Source +type SyncingSource = TypedSyncingSource[reconcile.Request] + +// TypedSyncingSource is a source that needs syncing prior to being usable. The controller +// will call its WaitForSync prior to starting workers. +type TypedSyncingSource[request comparable] interface { + TypedSource[request] WaitForSync(ctx context.Context) error } // Kind creates a KindSource with the given cache provider. -func Kind[T client.Object](cache cache.Cache, object T, handler handler.TypedEventHandler[T], predicates ...predicate.TypedPredicate[T]) SyncingSource { - return &internal.Kind[T]{ - Type: object, +func Kind[object client.Object]( + cache cache.Cache, + obj object, + handler handler.TypedEventHandler[object, reconcile.Request], + predicates ...predicate.TypedPredicate[object], +) SyncingSource { + return TypedKind(cache, obj, handler, predicates...) +} + +// TypedKind creates a KindSource with the given cache provider. +func TypedKind[object client.Object, request comparable]( + cache cache.Cache, + obj object, + handler handler.TypedEventHandler[object, request], + predicates ...predicate.TypedPredicate[object], +) TypedSyncingSource[request] { + return &internal.Kind[object, request]{ + Type: obj, Cache: cache, Handler: handler, Predicates: predicates, } } -var _ Source = &channel[string]{} +var _ Source = &channel[string, reconcile.Request]{} // ChannelOpt allows to configure a source.Channel. -type ChannelOpt[T any] func(*channel[T]) +type ChannelOpt[object any, request comparable] func(*channel[object, request]) // WithPredicates adds the configured predicates to a source.Channel. -func WithPredicates[T any](p ...predicate.TypedPredicate[T]) ChannelOpt[T] { - return func(c *channel[T]) { +func WithPredicates[object any, request comparable](p ...predicate.TypedPredicate[object]) ChannelOpt[object, request] { + return func(c *channel[object, request]) { c.predicates = append(c.predicates, p...) } } // WithBufferSize configures the buffer size for a source.Channel. By // default, the buffer size is 1024. -func WithBufferSize[T any](bufferSize int) ChannelOpt[T] { - return func(c *channel[T]) { +func WithBufferSize[object any, request comparable](bufferSize int) ChannelOpt[object, request] { + return func(c *channel[object, request]) { c.bufferSize = &bufferSize } } @@ -87,8 +117,23 @@ func WithBufferSize[T any](bufferSize int) ChannelOpt[T] { // Channel is used to provide a source of events originating outside the cluster // (e.g. GitHub Webhook callback). Channel requires the user to wire the external // source (e.g. http handler) to write GenericEvents to the underlying channel. -func Channel[T any](source <-chan event.TypedGenericEvent[T], handler handler.TypedEventHandler[T], opts ...ChannelOpt[T]) Source { - c := &channel[T]{ +func Channel[object any]( + source <-chan event.TypedGenericEvent[object], + handler handler.TypedEventHandler[object, reconcile.Request], + opts ...ChannelOpt[object, reconcile.Request], +) Source { + return TypedChannel[object, reconcile.Request](source, handler, opts...) +} + +// TypedChannel is used to provide a source of events originating outside the cluster +// (e.g. GitHub Webhook callback). Channel requires the user to wire the external +// source (e.g. http handler) to write GenericEvents to the underlying channel. +func TypedChannel[object any, request comparable]( + source <-chan event.TypedGenericEvent[object], + handler handler.TypedEventHandler[object, request], + opts ...ChannelOpt[object, request], +) TypedSource[request] { + c := &channel[object, request]{ source: source, handler: handler, } @@ -99,34 +144,34 @@ func Channel[T any](source <-chan event.TypedGenericEvent[T], handler handler.Ty return c } -type channel[T any] struct { +type channel[object any, request comparable] struct { // once ensures the event distribution goroutine will be performed only once once sync.Once // source is the source channel to fetch GenericEvents - source <-chan event.TypedGenericEvent[T] + source <-chan event.TypedGenericEvent[object] - handler handler.TypedEventHandler[T] + handler handler.TypedEventHandler[object, request] - predicates []predicate.TypedPredicate[T] + predicates []predicate.TypedPredicate[object] bufferSize *int // dest is the destination channels of the added event handlers - dest []chan event.TypedGenericEvent[T] + dest []chan event.TypedGenericEvent[object] // destLock is to ensure the destination channels are safely added/removed destLock sync.Mutex } -func (cs *channel[T]) String() string { +func (cs *channel[object, request]) String() string { return fmt.Sprintf("channel source: %p", cs) } // Start implements Source and should only be called by the Controller. -func (cs *channel[T]) Start( +func (cs *channel[object, request]) Start( ctx context.Context, - queue workqueue.RateLimitingInterface, + queue workqueue.TypedRateLimitingInterface[request], ) error { // Source should have been specified by the user. if cs.source == nil { @@ -140,7 +185,7 @@ func (cs *channel[T]) Start( cs.bufferSize = ptr.To(1024) } - dst := make(chan event.TypedGenericEvent[T], *cs.bufferSize) + dst := make(chan event.TypedGenericEvent[object], *cs.bufferSize) cs.destLock.Lock() cs.dest = append(cs.dest, dst) @@ -174,7 +219,7 @@ func (cs *channel[T]) Start( return nil } -func (cs *channel[T]) doStop() { +func (cs *channel[object, request]) doStop() { cs.destLock.Lock() defer cs.destLock.Unlock() @@ -183,7 +228,7 @@ func (cs *channel[T]) doStop() { } } -func (cs *channel[T]) distribute(evt event.TypedGenericEvent[T]) { +func (cs *channel[object, request]) distribute(evt event.TypedGenericEvent[object]) { cs.destLock.Lock() defer cs.destLock.Unlock() @@ -197,7 +242,7 @@ func (cs *channel[T]) distribute(evt event.TypedGenericEvent[T]) { } } -func (cs *channel[T]) syncLoop(ctx context.Context) { +func (cs *channel[object, request]) syncLoop(ctx context.Context) { for { select { case <-ctx.Done(): @@ -228,7 +273,7 @@ var _ Source = &Informer{} // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. -func (is *Informer) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error { +func (is *Informer) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[reconcile.Request]) error { // Informer should have been specified by the user. if is.Informer == nil { return fmt.Errorf("must specify Informer.Informer") @@ -251,13 +296,16 @@ func (is *Informer) String() string { var _ Source = Func(nil) // Func is a function that implements Source. -type Func func(context.Context, workqueue.RateLimitingInterface) error +type Func = TypedFunc[reconcile.Request] + +// TypedFunc is a function that implements Source. +type TypedFunc[request comparable] func(context.Context, workqueue.TypedRateLimitingInterface[request]) error // Start implements Source. -func (f Func) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error { +func (f TypedFunc[request]) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[request]) error { return f(ctx, queue) } -func (f Func) String() string { +func (f TypedFunc[request]) String() string { return fmt.Sprintf("func source: %p", f) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics/metrics.go new file mode 100644 index 0000000000..358a3a9162 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics/metrics.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // WebhookPanics is a prometheus counter metrics which holds the total + // number of panics from webhooks. + WebhookPanics = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_webhook_panics_total", + Help: "Total number of webhook panics", + }, []string{}) +) + +func init() { + metrics.Registry.MustRegister( + WebhookPanics, + ) + // Init metric. + WebhookPanics.WithLabelValues().Add(0) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index f1767f31b2..cba6da2cb0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog/v2" + admissionmetrics "sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" @@ -123,7 +124,8 @@ type Webhook struct { Handler Handler // RecoverPanic indicates whether the panic caused by webhook should be recovered. - RecoverPanic bool + // Defaults to true. + RecoverPanic *bool // WithContextFunc will allow you to take the http.Request.Context() and // add any additional information such as passing the request path or @@ -141,8 +143,9 @@ type Webhook struct { } // WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered. +// Defaults to true. func (wh *Webhook) WithRecoverPanic(recoverPanic bool) *Webhook { - wh.RecoverPanic = recoverPanic + wh.RecoverPanic = &recoverPanic return wh } @@ -151,17 +154,26 @@ func (wh *Webhook) WithRecoverPanic(recoverPanic bool) *Webhook { // If the webhook is validating type, it delegates the AdmissionRequest to each handler and // deny the request if anyone denies. func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) { - if wh.RecoverPanic { - defer func() { - if r := recover(); r != nil { + defer func() { + if r := recover(); r != nil { + admissionmetrics.WebhookPanics.WithLabelValues().Inc() + + if wh.RecoverPanic == nil || *wh.RecoverPanic { for _, fn := range utilruntime.PanicHandlers { - fn(r) + fn(ctx, r) } response = Errored(http.StatusInternalServerError, fmt.Errorf("panic: %v [recovered]", r)) + // Note: We explicitly have to set the response UID. Usually that is done via resp.Complete below, + // but if we encounter a panic in wh.Handler.Handle we are never going to reach resp.Complete. + response.UID = req.UID return } - }() - } + + log := logf.FromContext(ctx) + log.Info(fmt.Sprintf("Observed a panic in webhook: %v", r)) + panic(r) + } + }() reqLog := wh.getLogger(&req) ctx = logf.IntoContext(ctx, reqLog) @@ -169,7 +181,10 @@ func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { reqLog.Error(err, "unable to encode response") - return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) + resp := Errored(http.StatusInternalServerError, errUnableToEncodeResponse) + // Note: We explicitly have to set the response UID. Usually that is done via resp.Complete. + resp.UID = req.UID + return resp } return resp diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go index d2a728498a..58297d4415 100644 --- a/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go +++ b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go @@ -6,7 +6,7 @@ package imagetag import ( "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/image" + "sigs.k8s.io/kustomize/api/internal/image" "sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/kyaml/yaml" ) diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go index 4815f10a29..ff83420cb1 100644 --- a/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go +++ b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go @@ -284,9 +284,9 @@ func (f Filter) roleRefFilter() sieveFunc { return previousIdSelectedByGvk(roleRefGvk) } -func prefixSuffixEquals(other resource.ResCtx) sieveFunc { +func prefixSuffixEquals(other resource.ResCtx, allowEmpty bool) sieveFunc { return func(r *resource.Resource) bool { - return r.PrefixesSuffixesEquals(other) + return r.PrefixesSuffixesEquals(other, allowEmpty) } } @@ -325,7 +325,10 @@ func (f Filter) selectReferral( if len(candidates) == 1 { return candidates[0], nil } - candidates = doSieve(candidates, prefixSuffixEquals(f.Referrer)) + candidates = doSieve(candidates, prefixSuffixEquals(f.Referrer, true)) + if len(candidates) > 1 { + candidates = doSieve(candidates, prefixSuffixEquals(f.Referrer, false)) + } if len(candidates) == 1 { return candidates[0], nil } diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go b/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go index 5749d6ddfd..671567d9a1 100644 --- a/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go +++ b/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go @@ -6,7 +6,7 @@ package patchjson6902 import ( "strings" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" "sigs.k8s.io/kustomize/kyaml/kio" "sigs.k8s.io/kustomize/kyaml/yaml" k8syaml "sigs.k8s.io/yaml" diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go b/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go index d4fea260be..bea5690c40 100644 --- a/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go +++ b/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go @@ -126,8 +126,8 @@ func applyReplacement(nodes []*yaml.RNode, value *yaml.RNode, targetSelectors [] } // filter targets by matching resource IDs - for i, id := range ids { - if id.IsSelectedBy(selector.Select.ResId) && !rejectId(selector.Reject, &ids[i]) { + for _, id := range ids { + if id.IsSelectedBy(selector.Select.ResId) && !containsRejectId(selector.Reject, ids) { err := copyValueToTarget(possibleTarget, value, selector) if err != nil { return nil, err @@ -168,10 +168,15 @@ func matchesAnnoAndLabelSelector(n *yaml.RNode, selector *types.Selector) (bool, return annoMatch && labelMatch, nil } -func rejectId(rejects []*types.Selector, id *resid.ResId) bool { +func containsRejectId(rejects []*types.Selector, ids []resid.ResId) bool { for _, r := range rejects { - if !r.ResId.IsEmpty() && id.IsSelectedBy(r.ResId) { - return true + if r.ResId.IsEmpty() { + continue + } + for _, id := range ids { + if id.IsSelectedBy(r.ResId) { + return true + } } } return false diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go index afe6da2e74..f60c517b76 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go @@ -144,7 +144,7 @@ func loadCrdIntoConfig( } _, label := property.Extensions.GetString(xLabelSelector) if label { - err = theConfig.AddLabelFieldSpec( + err = theConfig.AddCommonLabelsFieldSpec( makeFs(theGvk, append(path, propName))) if err != nil { return diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go index 113f56ea7c..c93c18c009 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go @@ -12,11 +12,12 @@ import ( "regexp" "strings" - "github.com/imdario/mergo" "sigs.k8s.io/kustomize/api/resmap" "sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/kyaml/errors" "sigs.k8s.io/kustomize/kyaml/kio" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/merge2" "sigs.k8s.io/yaml" ) @@ -53,6 +54,15 @@ func (p *HelmChartInflationGeneratorPlugin) Config( if h.GeneralConfig().HelmConfig.Command == "" { return fmt.Errorf("must specify --helm-command") } + + // CLI args takes precedence + if h.GeneralConfig().HelmConfig.KubeVersion != "" { + p.HelmChart.KubeVersion = h.GeneralConfig().HelmConfig.KubeVersion + } + if len(h.GeneralConfig().HelmConfig.ApiVersions) != 0 { + p.HelmChart.ApiVersions = h.GeneralConfig().HelmConfig.ApiVersions + } + p.h = h if err = yaml.Unmarshal(config, p); err != nil { return @@ -91,7 +101,7 @@ func (p *HelmChartInflationGeneratorPlugin) validateArgs() (err error) { // be under the loader root (unless root restrictions are // disabled). if p.ValuesFile == "" { - p.ValuesFile = filepath.Join(p.ChartHome, p.Name, "values.yaml") + p.ValuesFile = filepath.Join(p.absChartHome(), p.Name, "values.yaml") } for i, file := range p.AdditionalValuesFiles { // use Load() to enforce root restrictions @@ -132,10 +142,17 @@ func (p *HelmChartInflationGeneratorPlugin) errIfIllegalValuesMerge() error { } func (p *HelmChartInflationGeneratorPlugin) absChartHome() string { + var chartHome string if filepath.IsAbs(p.ChartHome) { - return p.ChartHome + chartHome = p.ChartHome + } else { + chartHome = filepath.Join(p.h.Loader().Root(), p.ChartHome) } - return filepath.Join(p.h.Loader().Root(), p.ChartHome) + + if p.Version != "" && p.Repo != "" { + return filepath.Join(chartHome, fmt.Sprintf("%s-%s", p.Name, p.Version)) + } + return chartHome } func (p *HelmChartInflationGeneratorPlugin) runHelmCommand( @@ -185,18 +202,33 @@ func (p *HelmChartInflationGeneratorPlugin) replaceValuesInline() error { if err != nil { return err } - chValues := make(map[string]interface{}) - if err = yaml.Unmarshal(pValues, &chValues); err != nil { - return err + chValues, err := kyaml.Parse(string(pValues)) + if err != nil { + return errors.WrapPrefixf(err, "could not parse values file into rnode") } + inlineValues, err := kyaml.FromMap(p.ValuesInline) + if err != nil { + return errors.WrapPrefixf(err, "could not parse values inline into rnode") + } + var outValues *kyaml.RNode switch p.ValuesMerge { + // Function `merge2.Merge` overrides values in dest with values from src. + // To achieve override or merge behavior, we pass parameters in different order. + // Object passed as dest will be modified, so we copy it just in case someone + // decides to use it after this is called. case valuesMergeOptionOverride: - err = mergo.Merge( - &chValues, p.ValuesInline, mergo.WithOverride) + outValues, err = merge2.Merge(inlineValues, chValues.Copy(), kyaml.MergeOptions{}) case valuesMergeOptionMerge: - err = mergo.Merge(&chValues, p.ValuesInline) + outValues, err = merge2.Merge(chValues, inlineValues.Copy(), kyaml.MergeOptions{}) + } + if err != nil { + return errors.WrapPrefixf(err, "could not merge values") + } + mapValues, err := outValues.Map() + if err != nil { + return errors.WrapPrefixf(err, "could not parse merged values into map") } - p.ValuesInline = chValues + p.ValuesInline = mapValues return err } @@ -281,8 +313,18 @@ func (p *HelmChartInflationGeneratorPlugin) pullCommand() []string { "pull", "--untar", "--untardir", p.absChartHome(), - "--repo", p.Repo, - p.Name} + } + + switch { + case strings.HasPrefix(p.Repo, "oci://"): + args = append(args, strings.TrimSuffix(p.Repo, "/")+"/"+p.Name) + case p.Repo != "": + args = append(args, "--repo", p.Repo) + fallthrough + default: + args = append(args, p.Name) + } + if p.Version != "" { args = append(args, "--version", p.Version) } diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go index 5ee5353f4a..04625e5109 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go @@ -6,7 +6,7 @@ package builtins import ( "fmt" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" "sigs.k8s.io/kustomize/api/filters/patchjson6902" "sigs.k8s.io/kustomize/api/ifc" "sigs.k8s.io/kustomize/api/resmap" diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go index 45be1ef252..8e6eb41128 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go @@ -7,104 +7,123 @@ import ( "fmt" "strings" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" "sigs.k8s.io/kustomize/api/filters/patchjson6902" "sigs.k8s.io/kustomize/api/resmap" "sigs.k8s.io/kustomize/api/resource" "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" "sigs.k8s.io/kustomize/kyaml/kio/kioutil" "sigs.k8s.io/yaml" ) type PatchTransformerPlugin struct { - loadedPatch *resource.Resource - decodedPatch jsonpatch.Patch - Path string `json:"path,omitempty" yaml:"path,omitempty"` - Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` - Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` - Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` + smPatches []*resource.Resource // strategic-merge patches + jsonPatches jsonpatch.Patch // json6902 patch + // patchText is pure patch text created by Path or Patch + patchText string + // patchSource is patch source message + patchSource string + Path string `json:"path,omitempty" yaml:"path,omitempty"` + Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` + Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` + Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` } -func (p *PatchTransformerPlugin) Config( - h *resmap.PluginHelpers, c []byte) error { - err := yaml.Unmarshal(c, p) - if err != nil { +func (p *PatchTransformerPlugin) Config(h *resmap.PluginHelpers, c []byte) error { + if err := yaml.Unmarshal(c, p); err != nil { return err } + p.Patch = strings.TrimSpace(p.Patch) - if p.Patch == "" && p.Path == "" { - return fmt.Errorf( - "must specify one of patch and path in\n%s", string(c)) - } - if p.Patch != "" && p.Path != "" { - return fmt.Errorf( - "patch and path can't be set at the same time\n%s", string(c)) - } - if p.Path != "" { - loaded, loadErr := h.Loader().Load(p.Path) - if loadErr != nil { - return loadErr + switch { + case p.Patch == "" && p.Path == "": + return fmt.Errorf("must specify one of patch and path in\n%s", string(c)) + case p.Patch != "" && p.Path != "": + return fmt.Errorf("patch and path can't be set at the same time\n%s", string(c)) + case p.Patch != "": + p.patchText = p.Patch + p.patchSource = fmt.Sprintf("[patch: %q]", p.patchText) + case p.Path != "": + loaded, err := h.Loader().Load(p.Path) + if err != nil { + return fmt.Errorf("failed to get the patch file from path(%s): %w", p.Path, err) } - p.Patch = string(loaded) + p.patchText = string(loaded) + p.patchSource = fmt.Sprintf("[path: %q]", p.Path) } - patchSM, errSM := h.ResmapFactory().RF().FromBytes([]byte(p.Patch)) - patchJson, errJson := jsonPatchFromBytes([]byte(p.Patch)) + patchesSM, errSM := h.ResmapFactory().RF().SliceFromBytes([]byte(p.patchText)) + patchesJson, errJson := jsonPatchFromBytes([]byte(p.patchText)) + if (errSM == nil && errJson == nil) || - (patchSM != nil && patchJson != nil) { + (patchesSM != nil && patchesJson != nil) { return fmt.Errorf( - "illegally qualifies as both an SM and JSON patch: [%v]", - p.Patch) + "illegally qualifies as both an SM and JSON patch: %s", + p.patchSource) } if errSM != nil && errJson != nil { return fmt.Errorf( - "unable to parse SM or JSON patch from [%v]", p.Patch) + "unable to parse SM or JSON patch from %s", p.patchSource) } if errSM == nil { - p.loadedPatch = patchSM - if p.Options["allowNameChange"] { - p.loadedPatch.AllowNameChange() - } - if p.Options["allowKindChange"] { - p.loadedPatch.AllowKindChange() + p.smPatches = patchesSM + for _, loadedPatch := range p.smPatches { + if p.Options["allowNameChange"] { + loadedPatch.AllowNameChange() + } + if p.Options["allowKindChange"] { + loadedPatch.AllowKindChange() + } } } else { - p.decodedPatch = patchJson + p.jsonPatches = patchesJson } return nil } func (p *PatchTransformerPlugin) Transform(m resmap.ResMap) error { - if p.loadedPatch == nil { - return p.transformJson6902(m, p.decodedPatch) + if p.smPatches != nil { + return p.transformStrategicMerge(m) } - // The patch was a strategic merge patch - return p.transformStrategicMerge(m, p.loadedPatch) + return p.transformJson6902(m) } -// transformStrategicMerge applies the provided strategic merge patch -// to all the resources in the ResMap that match either the Target or -// the identifier of the patch. -func (p *PatchTransformerPlugin) transformStrategicMerge(m resmap.ResMap, patch *resource.Resource) error { - if p.Target == nil { - target, err := m.GetById(patch.OrgId()) +// transformStrategicMerge applies each loaded strategic merge patch +// to the resource in the ResMap that matches the identifier of the patch. +// If only one patch is specified, the Target can be used instead. +func (p *PatchTransformerPlugin) transformStrategicMerge(m resmap.ResMap) error { + if p.Target != nil { + if len(p.smPatches) > 1 { + // detail: https://github.com/kubernetes-sigs/kustomize/issues/5049#issuecomment-1440604403 + return fmt.Errorf("Multiple Strategic-Merge Patches in one `patches` entry is not allowed to set `patches.target` field: %s", p.patchSource) + } + + // single patch + patch := p.smPatches[0] + selected, err := m.Select(*p.Target) if err != nil { - return err + return fmt.Errorf("unable to find patch target %q in `resources`: %w", p.Target, err) } - return target.ApplySmPatch(patch) + return errors.Wrap(m.ApplySmPatch(resource.MakeIdSet(selected), patch)) } - selected, err := m.Select(*p.Target) - if err != nil { - return err + + for _, patch := range p.smPatches { + target, err := m.GetById(patch.OrgId()) + if err != nil { + return fmt.Errorf("no resource matches strategic merge patch %q: %w", patch.OrgId(), err) + } + if err := target.ApplySmPatch(patch); err != nil { + return errors.Wrap(err) + } } - return m.ApplySmPatch(resource.MakeIdSet(selected), patch) + return nil } -// transformJson6902 applies the provided json6902 patch -// to all the resources in the ResMap that match the Target. -func (p *PatchTransformerPlugin) transformJson6902(m resmap.ResMap, patch jsonpatch.Patch) error { +// transformJson6902 applies json6902 Patch to all the resources in the ResMap that match Target. +func (p *PatchTransformerPlugin) transformJson6902(m resmap.ResMap) error { if p.Target == nil { - return fmt.Errorf("must specify a target for patch %s", p.Patch) + return fmt.Errorf("must specify a target for JSON patch %s", p.patchSource) } resources, err := m.Select(*p.Target) if err != nil { @@ -114,7 +133,7 @@ func (p *PatchTransformerPlugin) transformJson6902(m resmap.ResMap, patch jsonpa res.StorePreviousId() internalAnnotations := kioutil.GetInternalAnnotations(&res.RNode) err = res.ApplyFilter(patchjson6902.Filter{ - Patch: p.Patch, + Patch: p.patchText, }) if err != nil { return err @@ -129,16 +148,17 @@ func (p *PatchTransformerPlugin) transformJson6902(m resmap.ResMap, patch jsonpa return nil } -// jsonPatchFromBytes loads a Json 6902 patch from -// a bytes input -func jsonPatchFromBytes( - in []byte) (jsonpatch.Patch, error) { +// jsonPatchFromBytes loads a Json 6902 patch from a bytes input +func jsonPatchFromBytes(in []byte) (jsonpatch.Patch, error) { ops := string(in) if ops == "" { return nil, fmt.Errorf("empty json patch operations") } if ops[0] != '[' { + // TODO(5049): + // In the case of multiple yaml documents, return error instead of ignoring all but first. + // Details: https://github.com/kubernetes-sigs/kustomize/pull/5194#discussion_r1256686728 jsonOps, err := yaml.YAMLToJSON(in) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go index 687f21a608..c50f627fee 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go @@ -74,34 +74,16 @@ func (p *SortOrderTransformerPlugin) Transform(m resmap.ResMap) (err error) { // Sort if p.SortOptions.Order == types.LegacySortOrder { - s := newLegacyIDSorter(m.AllIds(), p.SortOptions.LegacySortOptions) + s := newLegacyIDSorter(m.Resources(), p.SortOptions.LegacySortOptions) sort.Sort(s) - err = applyOrdering(m, s.resids) - if err != nil { - return err - } - } - return nil -} -// applyOrdering takes resources (given in ResMap) and a desired ordering given -// as a sequence of ResIds, and updates the ResMap's resources to match the -// ordering. -func applyOrdering(m resmap.ResMap, ordering []resid.ResId) error { - var err error - resources := make([]*resource.Resource, m.Size()) - // Clear and refill with the correct order - for i, id := range ordering { - resources[i], err = m.GetByCurrentId(id) - if err != nil { - return errors.WrapPrefixf(err, "expected match for sorting") - } - } - m.Clear() - for _, r := range resources { - err = m.Append(r) - if err != nil { - return errors.WrapPrefixf(err, "SortOrderTransformer: Failed to append to resources") + // Clear the map and re-add the resources in the sorted order. + m.Clear() + for _, r := range s.resources { + err := m.Append(r) + if err != nil { + return errors.WrapPrefixf(err, "SortOrderTransformer: Failed to append to resources") + } } } return nil @@ -117,12 +99,17 @@ func applyOrdering(m resmap.ResMap, ordering []resid.ResId) error { type legacyIDSorter struct { // resids only stores the metadata of the object. This is an optimization as // it's expensive to compute these again and again during ordering. - resids []resid.ResId + resids []resid.ResId + // Initially, we sorted the metadata (ResId) of each object and then called GetByCurrentId on each to construct the final list. + // The problem is that GetByCurrentId is inefficient and does a linear scan in a list every time we do that. + // So instead, we sort resources alongside the ResIds. + resources []*resource.Resource + typeOrders map[string]int } func newLegacyIDSorter( - resids []resid.ResId, + resources []*resource.Resource, options *types.LegacySortOptions) *legacyIDSorter { // Precalculate a resource ranking based on the priority lists. var typeOrders = func() map[string]int { @@ -135,10 +122,13 @@ func newLegacyIDSorter( } return m }() - return &legacyIDSorter{ - resids: resids, - typeOrders: typeOrders, + + ret := &legacyIDSorter{typeOrders: typeOrders} + for _, res := range resources { + ret.resids = append(ret.resids, res.CurId()) + ret.resources = append(ret.resources, res) } + return ret } var _ sort.Interface = legacyIDSorter{} @@ -146,6 +136,7 @@ var _ sort.Interface = legacyIDSorter{} func (a legacyIDSorter) Len() int { return len(a.resids) } func (a legacyIDSorter) Swap(i, j int) { a.resids[i], a.resids[j] = a.resids[j], a.resids[i] + a.resources[i], a.resources[j] = a.resources[j], a.resources[i] } func (a legacyIDSorter) Less(i, j int) bool { if !a.resids[i].Gvk.Equals(a.resids[j].Gvk) { diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go b/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go index d15fa75903..2098cdd31e 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go @@ -22,10 +22,16 @@ func ClonerUsingGitExec(repoSpec *RepoSpec) error { if err = r.run("init"); err != nil { return err } + // git relative submodule need origin, see https://github.com/kubernetes-sigs/kustomize/issues/5131 + if err = r.run("remote", "add", "origin", repoSpec.CloneSpec()); err != nil { + return err + } ref := "HEAD" if repoSpec.Ref != "" { ref = repoSpec.Ref } + // we use repoSpec.CloneSpec() instead of origin because on error, + // the prior prints the actual repo url for the user. if err = r.run("fetch", "--depth=1", repoSpec.CloneSpec(), ref); err != nil { return err } diff --git a/vendor/sigs.k8s.io/kustomize/api/image/image.go b/vendor/sigs.k8s.io/kustomize/api/internal/image/image.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/image/image.go rename to vendor/sigs.k8s.io/kustomize/api/internal/image/image.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/commonannotations.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/commonannotations.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/commonlabels.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/commonlabels.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/defaultconfig.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/defaultconfig.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/doc.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/doc.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/images.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/images.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/metadatalabels.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/metadatalabels.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/metadatalabels.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/metadatalabels.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/nameprefix.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/nameprefix.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namereference.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namereference.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namespace.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namespace.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namesuffix.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namesuffix.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/replicas.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/replicas.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/templatelabels.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/templatelabels.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/templatelabels.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/templatelabels.go diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/varreference.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go rename to vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/varreference.go diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/errors.go b/vendor/sigs.k8s.io/kustomize/api/internal/loader/errors.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/loader/errors.go rename to vendor/sigs.k8s.io/kustomize/api/internal/loader/errors.go diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go b/vendor/sigs.k8s.io/kustomize/api/internal/loader/fileloader.go similarity index 75% rename from vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go rename to vendor/sigs.k8s.io/kustomize/api/internal/loader/fileloader.go index 672ca29b3c..6ecc9fcefa 100644 --- a/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/loader/fileloader.go @@ -25,7 +25,7 @@ func IsRemoteFile(path string) bool { return err == nil && (u.Scheme == "http" || u.Scheme == "https") } -// fileLoader is a kustomization's interface to files. +// FileLoader is a kustomization's interface to files. // // The directory in which a kustomization file sits // is referred to below as the kustomization's _root_. @@ -38,49 +38,48 @@ func IsRemoteFile(path string) bool { // // * supplemental data paths // -// `Load` is used to visit these paths. +// `Load` is used to visit these paths. // -// These paths refer to resources, patches, -// data for ConfigMaps and Secrets, etc. +// These paths refer to resources, patches, +// data for ConfigMaps and Secrets, etc. // -// The loadRestrictor may disallow certain paths -// or classes of paths. +// The loadRestrictor may disallow certain paths +// or classes of paths. // // * bases (other kustomizations) // -// `New` is used to load bases. +// `New` is used to load bases. // -// A base can be either a remote git repo URL, or -// a directory specified relative to the current -// root. In the former case, the repo is locally -// cloned, and the new loader is rooted on a path -// in that clone. +// A base can be either a remote git repo URL, or +// a directory specified relative to the current +// root. In the former case, the repo is locally +// cloned, and the new loader is rooted on a path +// in that clone. // -// As loaders create new loaders, a root history -// is established, and used to disallow: +// As loaders create new loaders, a root history +// is established, and used to disallow: // -// - A base that is a repository that, in turn, -// specifies a base repository seen previously -// in the loading stack (a cycle). +// - A base that is a repository that, in turn, +// specifies a base repository seen previously +// in the loading stack (a cycle). // -// - An overlay depending on a base positioned at -// or above it. I.e. '../foo' is OK, but '.', -// '..', '../..', etc. are disallowed. Allowing -// such a base has no advantages and encourages -// cycles, particularly if some future change -// were to introduce globbing to file -// specifications in the kustomization file. +// - An overlay depending on a base positioned at +// or above it. I.e. '../foo' is OK, but '.', +// '..', '../..', etc. are disallowed. Allowing +// such a base has no advantages and encourages +// cycles, particularly if some future change +// were to introduce globbing to file +// specifications in the kustomization file. // // These restrictions assure that kustomizations // are self-contained and relocatable, and impose // some safety when relying on remote kustomizations, // e.g. a remotely loaded ConfigMap generator specified // to read from /etc/passwd will fail. -// -type fileLoader struct { +type FileLoader struct { // Loader that spawned this loader. // Used to avoid cycles. - referrer *fileLoader + referrer *FileLoader // An absolute, cleaned path to a directory. // The Load function will read non-absolute @@ -107,23 +106,9 @@ type fileLoader struct { cleaner func() error } -// NewFileLoaderAtCwd returns a loader that loads from PWD. -// A convenience for kustomize edit commands. -func NewFileLoaderAtCwd(fSys filesys.FileSystem) *fileLoader { - return newLoaderOrDie( - RestrictionRootOnly, fSys, filesys.SelfDir) -} - -// NewFileLoaderAtRoot returns a loader that loads from "/". -// A convenience for tests. -func NewFileLoaderAtRoot(fSys filesys.FileSystem) *fileLoader { - return newLoaderOrDie( - RestrictionRootOnly, fSys, filesys.Separator) -} - // Repo returns the absolute path to the repo that contains Root if this fileLoader was created from a url // or the empty string otherwise. -func (fl *fileLoader) Repo() string { +func (fl *FileLoader) Repo() string { if fl.repoSpec != nil { return fl.repoSpec.Dir.String() } @@ -132,13 +117,13 @@ func (fl *fileLoader) Repo() string { // Root returns the absolute path that is prepended to any // relative paths used in Load. -func (fl *fileLoader) Root() string { +func (fl *FileLoader) Root() string { return fl.root.String() } -func newLoaderOrDie( +func NewLoaderOrDie( lr LoadRestrictorFunc, - fSys filesys.FileSystem, path string) *fileLoader { + fSys filesys.FileSystem, path string) *FileLoader { root, err := filesys.ConfirmDir(fSys, path) if err != nil { log.Fatalf("unable to make loader at '%s'; %v", path, err) @@ -147,12 +132,12 @@ func newLoaderOrDie( lr, root, fSys, nil, git.ClonerUsingGitExec) } -// newLoaderAtConfirmedDir returns a new fileLoader with given root. +// newLoaderAtConfirmedDir returns a new FileLoader with given root. func newLoaderAtConfirmedDir( lr LoadRestrictorFunc, root filesys.ConfirmedDir, fSys filesys.FileSystem, - referrer *fileLoader, cloner git.Cloner) *fileLoader { - return &fileLoader{ + referrer *FileLoader, cloner git.Cloner) *FileLoader { + return &FileLoader{ loadRestrictor: lr, root: root, referrer: referrer, @@ -164,7 +149,7 @@ func newLoaderAtConfirmedDir( // New returns a new Loader, rooted relative to current loader, // or rooted in a temp directory holding a git repo clone. -func (fl *fileLoader) New(path string) (ifc.Loader, error) { +func (fl *FileLoader) New(path string) (ifc.Loader, error) { if path == "" { return nil, errors.Errorf("new root cannot be empty") } @@ -200,7 +185,7 @@ func (fl *fileLoader) New(path string) (ifc.Loader, error) { // directory holding a cloned git repo. func newLoaderAtGitClone( repoSpec *git.RepoSpec, fSys filesys.FileSystem, - referrer *fileLoader, cloner git.Cloner) (ifc.Loader, error) { + referrer *FileLoader, cloner git.Cloner) (ifc.Loader, error) { cleaner := repoSpec.Cleaner(fSys) err := cloner(repoSpec) if err != nil { @@ -229,7 +214,7 @@ func newLoaderAtGitClone( return nil, fmt.Errorf("%q refers to directory outside of repo %q", repoSpec.AbsPath(), repoSpec.CloneDir()) } - return &fileLoader{ + return &FileLoader{ // Clones never allowed to escape root. loadRestrictor: RestrictionRootOnly, root: root, @@ -241,7 +226,7 @@ func newLoaderAtGitClone( }, nil } -func (fl *fileLoader) errIfGitContainmentViolation( +func (fl *FileLoader) errIfGitContainmentViolation( base filesys.ConfirmedDir) error { containingRepo := fl.containingRepo() if containingRepo == nil { @@ -259,7 +244,7 @@ func (fl *fileLoader) errIfGitContainmentViolation( // Looks back through referrers for a git repo, returning nil // if none found. -func (fl *fileLoader) containingRepo() *git.RepoSpec { +func (fl *FileLoader) containingRepo() *git.RepoSpec { if fl.repoSpec != nil { return fl.repoSpec } @@ -271,7 +256,7 @@ func (fl *fileLoader) containingRepo() *git.RepoSpec { // errIfArgEqualOrHigher tests whether the argument, // is equal to or above the root of any ancestor. -func (fl *fileLoader) errIfArgEqualOrHigher( +func (fl *FileLoader) errIfArgEqualOrHigher( candidateRoot filesys.ConfirmedDir) error { if fl.root.HasPrefix(candidateRoot) { return fmt.Errorf( @@ -288,7 +273,7 @@ func (fl *fileLoader) errIfArgEqualOrHigher( // I.e. Allow a distinction between git URI with // path foo and tag bar and a git URI with the same // path but a different tag? -func (fl *fileLoader) errIfRepoCycle(newRepoSpec *git.RepoSpec) error { +func (fl *FileLoader) errIfRepoCycle(newRepoSpec *git.RepoSpec) error { // TODO(monopole): Use parsed data instead of Raw(). if fl.repoSpec != nil && strings.HasPrefix(fl.repoSpec.Raw(), newRepoSpec.Raw()) { @@ -305,7 +290,7 @@ func (fl *fileLoader) errIfRepoCycle(newRepoSpec *git.RepoSpec) error { // Load returns the content of file at the given path, // else an error. Relative paths are taken relative // to the root. -func (fl *fileLoader) Load(path string) ([]byte, error) { +func (fl *FileLoader) Load(path string) ([]byte, error) { if IsRemoteFile(path) { return fl.httpClientGetContent(path) } @@ -319,7 +304,7 @@ func (fl *fileLoader) Load(path string) ([]byte, error) { return fl.fSys.ReadFile(path) } -func (fl *fileLoader) httpClientGetContent(path string) ([]byte, error) { +func (fl *FileLoader) httpClientGetContent(path string) ([]byte, error) { var hc *http.Client if fl.http != nil { hc = fl.http @@ -344,6 +329,6 @@ func (fl *fileLoader) httpClientGetContent(path string) ([]byte, error) { } // Cleanup runs the cleaner. -func (fl *fileLoader) Cleanup() error { +func (fl *FileLoader) Cleanup() error { return fl.cleaner() } diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/loader.go b/vendor/sigs.k8s.io/kustomize/api/internal/loader/loader.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/loader/loader.go rename to vendor/sigs.k8s.io/kustomize/api/internal/loader/loader.go diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go b/vendor/sigs.k8s.io/kustomize/api/internal/loader/loadrestrictions.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go rename to vendor/sigs.k8s.io/kustomize/api/internal/loader/loadrestrictions.go diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go index bf5e3f8a32..434941b6e0 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go @@ -33,7 +33,7 @@ func loadDefaultConfig( // makeTransformerConfigFromBytes returns a TransformerConfig object from bytes func makeTransformerConfigFromBytes(data []byte) (*TransformerConfig, error) { var t TransformerConfig - err := yaml.Unmarshal(data, &t) + err := yaml.UnmarshalStrict(data, &t) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go index 36ef42c272..eb0f307338 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go @@ -47,6 +47,8 @@ type NameBackReferences struct { // TODO: rename json 'fieldSpecs' to 'referrers' for clarity. // This will, however, break anyone using a custom config. Referrers types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` + + // Note: If any new pointer based members are added, DeepCopy needs to be updated } func (n NameBackReferences) String() string { @@ -66,6 +68,17 @@ func (s nbrSlice) Less(i, j int) bool { return s[i].Gvk.IsLessThan(s[j].Gvk) } +// DeepCopy returns a new copy of nbrSlice +func (s nbrSlice) DeepCopy() nbrSlice { + ret := make(nbrSlice, len(s)) + copy(ret, s) + for i, slice := range ret { + ret[i].Referrers = slice.Referrers.DeepCopy() + } + + return ret +} + func (s nbrSlice) mergeAll(o nbrSlice) (result nbrSlice, err error) { result = s for _, r := range o { diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go index 69b8bd4fb2..c539c290d5 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go @@ -6,19 +6,22 @@ package builtinconfig import ( "log" "sort" + "sync" "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts" + "sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts" "sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/kyaml/errors" ) // TransformerConfig holds the data needed to perform transformations. type TransformerConfig struct { + // if any fields are added, update the DeepCopy implementation NamePrefix types.FsSlice `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"` NameSuffix types.FsSlice `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"` NameSpace types.FsSlice `json:"namespace,omitempty" yaml:"namespace,omitempty"` CommonLabels types.FsSlice `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` + Labels types.FsSlice `json:"labels,omitempty" yaml:"labels,omitempty"` TemplateLabels types.FsSlice `json:"templateLabels,omitempty" yaml:"templateLabels,omitempty"` CommonAnnotations types.FsSlice `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"` NameReference nbrSlice `json:"nameReference,omitempty" yaml:"nameReference,omitempty"` @@ -32,14 +35,44 @@ func MakeEmptyConfig() *TransformerConfig { return &TransformerConfig{} } +// DeepCopy returns a new copy of TransformerConfig +func (t *TransformerConfig) DeepCopy() *TransformerConfig { + return &TransformerConfig{ + NamePrefix: t.NamePrefix.DeepCopy(), + NameSuffix: t.NameSuffix.DeepCopy(), + NameSpace: t.NameSpace.DeepCopy(), + CommonLabels: t.CommonLabels.DeepCopy(), + Labels: t.Labels.DeepCopy(), + TemplateLabels: t.TemplateLabels.DeepCopy(), + CommonAnnotations: t.CommonAnnotations.DeepCopy(), + NameReference: t.NameReference.DeepCopy(), + VarReference: t.VarReference.DeepCopy(), + Images: t.Images.DeepCopy(), + Replicas: t.Replicas.DeepCopy(), + } +} + +// the default transformer config is initialized by MakeDefaultConfig, +// and must only be accessed via that function. +var ( + initDefaultConfig sync.Once //nolint:gochecknoglobals + defaultConfig *TransformerConfig //nolint:gochecknoglobals +) + // MakeDefaultConfig returns a default TransformerConfig. func MakeDefaultConfig() *TransformerConfig { - c, err := makeTransformerConfigFromBytes( - builtinpluginconsts.GetDefaultFieldSpecs()) - if err != nil { - log.Fatalf("Unable to make default transformconfig: %v", err) - } - return c + // parsing is expensive when having a large tree with many kustomization modules, so only do it once + initDefaultConfig.Do(func() { + var err error + defaultConfig, err = makeTransformerConfigFromBytes( + builtinpluginconsts.GetDefaultFieldSpecs()) + if err != nil { + log.Fatalf("Unable to make default transformconfig: %v", err) + } + }) + + // return a copy to avoid any mutations to protect the reference copy + return defaultConfig.DeepCopy() } // MakeTransformerConfig returns a merger of custom config, @@ -63,6 +96,7 @@ func (t *TransformerConfig) sortFields() { sort.Sort(t.NameSuffix) sort.Sort(t.NameSpace) sort.Sort(t.CommonLabels) + sort.Sort(t.Labels) sort.Sort(t.TemplateLabels) sort.Sort(t.CommonAnnotations) sort.Sort(t.NameReference) @@ -83,12 +117,18 @@ func (t *TransformerConfig) AddSuffixFieldSpec(fs types.FieldSpec) (err error) { return err } -// AddLabelFieldSpec adds a FieldSpec to CommonLabels -func (t *TransformerConfig) AddLabelFieldSpec(fs types.FieldSpec) (err error) { +// AddCommonLabelsFieldSpec adds a FieldSpec to CommonLabels +func (t *TransformerConfig) AddCommonLabelsFieldSpec(fs types.FieldSpec) (err error) { t.CommonLabels, err = t.CommonLabels.MergeOne(fs) return err } +// AddLabelsFieldSpec adds a FieldSpec to Labels +func (t *TransformerConfig) AddLabelsFieldSpec(fs types.FieldSpec) (err error) { + t.Labels, err = t.Labels.MergeOne(fs) + return err //nolint:wrapcheck +} + // AddAnnotationFieldSpec adds a FieldSpec to CommonAnnotations func (t *TransformerConfig) AddAnnotationFieldSpec(fs types.FieldSpec) (err error) { t.CommonAnnotations, err = t.CommonAnnotations.MergeOne(fs) @@ -131,6 +171,10 @@ func (t *TransformerConfig) Merge(input *TransformerConfig) ( if err != nil { return nil, errors.WrapPrefixf(err, "failed to merge CommonLabels fieldSpec") } + merged.Labels, err = t.Labels.MergeAll(input.Labels) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge Labels fieldSpec") + } merged.TemplateLabels, err = t.TemplateLabels.MergeAll(input.TemplateLabels) if err != nil { return nil, errors.WrapPrefixf(err, "failed to merge TemplateLabels fieldSpec") diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go index 001731f780..71c52884f0 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go @@ -6,6 +6,7 @@ package execplugin import ( "bytes" "fmt" + "log" "os" "os/exec" "runtime" @@ -21,6 +22,7 @@ import ( const ( tmpConfigFilePrefix = "kust-plugin-config-" + maxArgStringLength = 131071 ) // ExecPlugin record the name and args of an executable @@ -169,23 +171,35 @@ func (p *ExecPlugin) invokePlugin(input []byte) ([]byte, error) { p.path, append([]string{f.Name()}, p.args...)...) cmd.Env = p.getEnv() cmd.Stdin = bytes.NewReader(input) - cmd.Stderr = os.Stderr + var stdErr bytes.Buffer + cmd.Stderr = &stdErr if _, err := os.Stat(p.h.Loader().Root()); err == nil { cmd.Dir = p.h.Loader().Root() } result, err := cmd.Output() if err != nil { return nil, errors.WrapPrefixf( - err, "failure in plugin configured via %s; %v", - f.Name(), err.Error()) + fmt.Errorf("failure in plugin configured via %s; %w", + f.Name(), err), stdErr.String()) } return result, os.Remove(f.Name()) } func (p *ExecPlugin) getEnv() []string { env := os.Environ() - env = append(env, - "KUSTOMIZE_PLUGIN_CONFIG_STRING="+string(p.cfg), - "KUSTOMIZE_PLUGIN_CONFIG_ROOT="+p.h.Loader().Root()) + pluginConfigString := "KUSTOMIZE_PLUGIN_CONFIG_STRING=" + string(p.cfg) + if len(pluginConfigString) <= maxArgStringLength { + env = append(env, pluginConfigString) + } else { + log.Printf("KUSTOMIZE_PLUGIN_CONFIG_STRING exceeds hard limit of %d characters, the environment variable "+ + "will be omitted", maxArgStringLength) + } + pluginConfigRoot := "KUSTOMIZE_PLUGIN_CONFIG_ROOT=" + p.h.Loader().Root() + if len(pluginConfigRoot) <= maxArgStringLength { + env = append(env, pluginConfigRoot) + } else { + log.Printf("KUSTOMIZE_PLUGIN_CONFIG_ROOT exceeds hard limit of %d characters, the environment variable "+ + "will be omitted", maxArgStringLength) + } return env } diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin.go new file mode 100644 index 0000000000..3757cfc646 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin.go @@ -0,0 +1,62 @@ +// Copyright 2024 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 +//go:build !kustomize_disable_go_plugin_support + +package loader + +import ( + "fmt" + "log" + "plugin" + "reflect" + + "sigs.k8s.io/kustomize/api/internal/plugins/utils" + "sigs.k8s.io/kustomize/api/konfig" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/resid" +) + +// registry is a means to avoid trying to load the same .so file +// into memory more than once, which results in an error. +// Each test makes its own loader, and tries to load its own plugins, +// but the loaded .so files are in shared memory, so one will get +// "this plugin already loaded" errors if the registry is maintained +// as a Loader instance variable. So make it a package variable. +var registry = make(map[string]resmap.Configurable) //nolint:gochecknoglobals + +func copyPlugin(c resmap.Configurable) resmap.Configurable { + indirect := reflect.Indirect(reflect.ValueOf(c)) + newIndirect := reflect.New(indirect.Type()) + newIndirect.Elem().Set(reflect.ValueOf(indirect.Interface())) + newNamed := newIndirect.Interface() + return newNamed.(resmap.Configurable) //nolint:forcetypeassert +} + +func (l *Loader) loadGoPlugin(id resid.ResId, absPath string) (resmap.Configurable, error) { + regId := relativePluginPath(id) + if c, ok := registry[regId]; ok { + return copyPlugin(c), nil + } + if !utils.FileExists(absPath) { + return nil, fmt.Errorf( + "expected file with Go object code at: %s", absPath) + } + log.Printf("Attempting plugin load from %q", absPath) + p, err := plugin.Open(absPath) + if err != nil { + return nil, errors.WrapPrefixf(err, "plugin %s fails to load", absPath) + } + symbol, err := p.Lookup(konfig.PluginSymbol) + if err != nil { + return nil, errors.WrapPrefixf( + err, "plugin %s doesn't have symbol %s", + regId, konfig.PluginSymbol) + } + c, ok := symbol.(resmap.Configurable) + if !ok { + return nil, fmt.Errorf("plugin %q not configurable", regId) + } + registry[regId] = c + return copyPlugin(c), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin_disabled.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin_disabled.go new file mode 100644 index 0000000000..5531b7967f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin_disabled.go @@ -0,0 +1,25 @@ +// Copyright 2024 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// The build tag "kustomize_disable_go_plugin_support" is used to deactivate the +// kustomize API's dependency on the "plugins" package. This is beneficial for +// applications that need to embed it but do not have requirements for dynamic +// Go plugins. +// Including plugins as a dependency can lead to an increase in binary size due +// to the population of ELF's sections such as .dynsym and .dynstr. +// By utilizing this flag, applications have the flexibility to exclude the +// import if they do not require support for dynamic Go plugins. +//go:build kustomize_disable_go_plugin_support + +package loader + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/kyaml/resid" +) + +func (l *Loader) loadGoPlugin(_ resid.ResId, _ string) (resmap.Configurable, error) { + return nil, fmt.Errorf("plugin load is disabled") +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go index 1758e5cf66..e494df767e 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go @@ -5,18 +5,14 @@ package loader import ( "fmt" - "log" "os" "path/filepath" - "plugin" - "reflect" "strings" "sigs.k8s.io/kustomize/api/ifc" "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" "sigs.k8s.io/kustomize/api/internal/plugins/execplugin" "sigs.k8s.io/kustomize/api/internal/plugins/fnplugin" - "sigs.k8s.io/kustomize/api/internal/plugins/utils" "sigs.k8s.io/kustomize/api/konfig" "sigs.k8s.io/kustomize/api/resmap" "sigs.k8s.io/kustomize/api/resource" @@ -287,46 +283,3 @@ func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, err return c, nil } -// registry is a means to avoid trying to load the same .so file -// into memory more than once, which results in an error. -// Each test makes its own loader, and tries to load its own plugins, -// but the loaded .so files are in shared memory, so one will get -// "this plugin already loaded" errors if the registry is maintained -// as a Loader instance variable. So make it a package variable. -var registry = make(map[string]resmap.Configurable) - -func (l *Loader) loadGoPlugin(id resid.ResId, absPath string) (resmap.Configurable, error) { - regId := relativePluginPath(id) - if c, ok := registry[regId]; ok { - return copyPlugin(c), nil - } - if !utils.FileExists(absPath) { - return nil, fmt.Errorf( - "expected file with Go object code at: %s", absPath) - } - log.Printf("Attempting plugin load from '%s'", absPath) - p, err := plugin.Open(absPath) - if err != nil { - return nil, errors.WrapPrefixf(err, "plugin %s fails to load", absPath) - } - symbol, err := p.Lookup(konfig.PluginSymbol) - if err != nil { - return nil, errors.WrapPrefixf( - err, "plugin %s doesn't have symbol %s", - regId, konfig.PluginSymbol) - } - c, ok := symbol.(resmap.Configurable) - if !ok { - return nil, fmt.Errorf("plugin '%s' not configurable", regId) - } - registry[regId] = c - return copyPlugin(c), nil -} - -func copyPlugin(c resmap.Configurable) resmap.Configurable { - indirect := reflect.Indirect(reflect.ValueOf(c)) - newIndirect := reflect.New(indirect.Type()) - newIndirect.Elem().Set(reflect.ValueOf(indirect.Interface())) - newNamed := newIndirect.Interface() - return newNamed.(resmap.Configurable) -} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go index 6e06a14baf..44c82bc597 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go @@ -13,12 +13,12 @@ import ( "sigs.k8s.io/kustomize/api/internal/accumulator" "sigs.k8s.io/kustomize/api/internal/builtins" "sigs.k8s.io/kustomize/api/internal/kusterr" + load "sigs.k8s.io/kustomize/api/internal/loader" "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" "sigs.k8s.io/kustomize/api/internal/plugins/loader" "sigs.k8s.io/kustomize/api/internal/utils" "sigs.k8s.io/kustomize/api/konfig" - load "sigs.k8s.io/kustomize/api/loader" "sigs.k8s.io/kustomize/api/resmap" "sigs.k8s.io/kustomize/api/resource" "sigs.k8s.io/kustomize/api/types" @@ -202,10 +202,6 @@ func (kt *KustTarget) accumulateTarget(ra *accumulator.ResAccumulator) ( if err != nil { return nil, errors.WrapPrefixf(err, "accumulating resources") } - ra, err = kt.accumulateComponents(ra, kt.kustomization.Components) - if err != nil { - return nil, errors.WrapPrefixf(err, "accumulating components") - } tConfig, err := builtinconfig.MakeTransformerConfig( kt.ldr, kt.kustomization.Configurations) if err != nil { @@ -230,6 +226,14 @@ func (kt *KustTarget) accumulateTarget(ra *accumulator.ResAccumulator) ( if err != nil { return nil, err } + + // components are expected to execute after reading resources and adding generators ,before applying transformers and validation. + // https://github.com/kubernetes-sigs/kustomize/pull/5170#discussion_r1212101287 + ra, err = kt.accumulateComponents(ra, kt.kustomization.Components) + if err != nil { + return nil, errors.WrapPrefixf(err, "accumulating components") + } + err = kt.runTransformers(ra) if err != nil { return nil, err @@ -421,7 +425,14 @@ func (kt *KustTarget) accumulateResources( } ldr, err := kt.ldr.New(path) if err != nil { - if kusterr.IsMalformedYAMLError(errF) { // Some error occurred while tyring to decode YAML file + // If accumulateFile found malformed YAML and there was a failure + // loading the resource as a base, then the resource is likely a + // file. The loader failure message is unnecessary, and could be + // confusing. Report only the file load error. + // + // However, a loader timeout implies there is a git repo at the + // path. In that case, both errors could be important. + if kusterr.IsMalformedYAMLError(errF) && !utils.IsErrTimeout(err) { return nil, errF } return nil, errors.WrapPrefixf( diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go index b589961e5f..1ba028a36f 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go @@ -275,13 +275,25 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( if len(kt.kustomization.Labels) == 0 && len(kt.kustomization.CommonLabels) == 0 { return } + + type labelStruct struct { + Labels map[string]string + FieldSpecs []types.FieldSpec + } + for _, label := range kt.kustomization.Labels { - var c struct { - Labels map[string]string - FieldSpecs []types.FieldSpec - } + var c labelStruct + c.Labels = label.Pairs fss := types.FsSlice(label.FieldSpecs) + + // merge labels specified in the label section of transformer configs + // these apply to selectors and templates + fss, err := fss.MergeAll(tc.Labels) + if err != nil { + return nil, fmt.Errorf("failed to merge labels: %w", err) + } + // merge the custom fieldSpecs with the default if label.IncludeSelectors { fss, err = fss.MergeAll(tc.CommonLabels) @@ -297,7 +309,7 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( fss, err = fss.MergeOne(types.FieldSpec{Path: "metadata/labels", CreateIfNotPresent: true}) } if err != nil { - return nil, err + return nil, fmt.Errorf("failed to merge labels: %w", err) } c.FieldSpecs = fss p := f() @@ -307,10 +319,9 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( } result = append(result, p) } - var c struct { - Labels map[string]string - FieldSpecs []types.FieldSpec - } + + var c labelStruct + c.Labels = kt.kustomization.CommonLabels c.FieldSpecs = tc.CommonLabels p := f() diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go b/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go index c1761d2c6c..a0d861c7bf 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go @@ -15,11 +15,11 @@ type errTimeOut struct { cmd string } -func NewErrTimeOut(d time.Duration, c string) errTimeOut { - return errTimeOut{duration: d, cmd: c} +func NewErrTimeOut(d time.Duration, c string) *errTimeOut { + return &errTimeOut{duration: d, cmd: c} } -func (e errTimeOut) Error() string { +func (e *errTimeOut) Error() string { return fmt.Sprintf("hit %s timeout running '%s'", e.duration, e.cmd) } diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go b/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go index 0afadd0c3f..cda53f864b 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go @@ -10,7 +10,7 @@ import ( // TimedCall runs fn, failing if it doesn't complete in the given duration. // The description is used in the timeout error message. func TimedCall(description string, d time.Duration, fn func() error) error { - done := make(chan error) + done := make(chan error, 1) timer := time.NewTimer(d) defer timer.Stop() go func() { done <- fn() }() diff --git a/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go b/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go index 8c76840545..999cbf4537 100644 --- a/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go +++ b/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go @@ -8,11 +8,11 @@ import ( "log" "sigs.k8s.io/kustomize/api/internal/builtins" + fLdr "sigs.k8s.io/kustomize/api/internal/loader" pLdr "sigs.k8s.io/kustomize/api/internal/plugins/loader" "sigs.k8s.io/kustomize/api/internal/target" "sigs.k8s.io/kustomize/api/internal/utils" "sigs.k8s.io/kustomize/api/konfig" - fLdr "sigs.k8s.io/kustomize/api/loader" "sigs.k8s.io/kustomize/api/provenance" "sigs.k8s.io/kustomize/api/provider" "sigs.k8s.io/kustomize/api/resmap" diff --git a/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go b/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go index 0e24fc36ca..c637ac2e1a 100644 --- a/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go +++ b/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go @@ -8,6 +8,8 @@ import ( "runtime" "runtime/debug" "strings" + + "github.com/blang/semver/v4" ) // These variables are set at build time using ldflags. @@ -62,9 +64,42 @@ func GetProvenance() Provenance { p.GitCommit = setting.Value } } + + for _, dep := range info.Deps { + if dep != nil && dep.Path == "sigs.k8s.io/kustomize/kustomize/v5" { + if dep.Version != "devel" { + continue + } + v, err := GetMostRecentTag(*dep) + if err != nil { + fmt.Printf("failed to get most recent tag for %s: %v\n", dep.Path, err) + continue + } + p.Version = v + } + } + return p } +func GetMostRecentTag(m debug.Module) (string, error) { + for m.Replace != nil { + m = *m.Replace + } + + split := strings.Split(m.Version, "-") + sv, err := semver.Parse(strings.TrimPrefix(split[0], "v")) + + if err != nil { + return "", fmt.Errorf("failed to parse version %s: %w", m.Version, err) + } + + if len(split) > 1 && sv.Patch > 0 { + sv.Patch -= 1 + } + return fmt.Sprintf("v%s", sv.FinalizeVersion()), nil +} + // Short returns the shortened provenance stamp. func (v Provenance) Short() string { return fmt.Sprintf( diff --git a/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go b/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go index 411e4e8138..2e34fae6a7 100644 --- a/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go +++ b/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go @@ -696,8 +696,7 @@ func (m *resWrangler) DeAnchor() (err error) { } // ApplySmPatch applies the patch, and errors on Id collisions. -func (m *resWrangler) ApplySmPatch( - selectedSet *resource.IdSet, patch *resource.Resource) error { +func (m *resWrangler) ApplySmPatch(selectedSet *resource.IdSet, patch *resource.Resource) error { var list []*resource.Resource for _, res := range m.rList { if selectedSet.Contains(res.CurId()) { diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/factory.go b/vendor/sigs.k8s.io/kustomize/api/resource/factory.go index cbda872376..fef2f6e499 100644 --- a/vendor/sigs.k8s.io/kustomize/api/resource/factory.go +++ b/vendor/sigs.k8s.io/kustomize/api/resource/factory.go @@ -41,30 +41,36 @@ func (rf *Factory) Hasher() ifc.KustHasher { } // FromMap returns a new instance of Resource. -func (rf *Factory) FromMap(m map[string]interface{}) *Resource { - return rf.FromMapAndOption(m, nil) +func (rf *Factory) FromMap(m map[string]interface{}) (*Resource, error) { + res, err := rf.FromMapAndOption(m, nil) + if err != nil { + return nil, fmt.Errorf("failed to create resource from map: %w", err) + } + return res, nil } // FromMapWithName returns a new instance with the given "original" name. -func (rf *Factory) FromMapWithName(n string, m map[string]interface{}) *Resource { +func (rf *Factory) FromMapWithName(n string, m map[string]interface{}) (*Resource, error) { return rf.FromMapWithNamespaceAndName(resid.DefaultNamespace, n, m) } // FromMapWithNamespaceAndName returns a new instance with the given "original" namespace. -func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) *Resource { - r := rf.FromMapAndOption(m, nil) - return r.setPreviousId(ns, n, r.GetKind()) +func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) (*Resource, error) { + r, err := rf.FromMapAndOption(m, nil) + if err != nil { + return nil, fmt.Errorf("failed to create resource from map: %w", err) + } + return r.setPreviousId(ns, n, r.GetKind()), nil } // FromMapAndOption returns a new instance of Resource with given options. func (rf *Factory) FromMapAndOption( - m map[string]interface{}, args *types.GeneratorArgs) *Resource { + m map[string]interface{}, args *types.GeneratorArgs) (*Resource, error) { n, err := yaml.FromMap(m) if err != nil { - // TODO: return err instead of log. - log.Fatal(err) + return nil, fmt.Errorf("failed to convert map to YAML node: %w", err) } - return rf.makeOne(n, args) + return rf.makeOne(n, args), nil } // makeOne returns a new instance of Resource. diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/resource.go b/vendor/sigs.k8s.io/kustomize/api/resource/resource.go index ae1a98be0e..9884a672c5 100644 --- a/vendor/sigs.k8s.io/kustomize/api/resource/resource.go +++ b/vendor/sigs.k8s.io/kustomize/api/resource/resource.go @@ -287,12 +287,25 @@ func (r *Resource) getCsvAnnotation(name string) []string { return strings.Split(annotations[name], ",") } -// PrefixesSuffixesEquals is conceptually doing the same task -// as OutermostPrefixSuffix but performs a deeper comparison -// of the suffix and prefix slices. -func (r *Resource) PrefixesSuffixesEquals(o ResCtx) bool { - return utils.SameEndingSubSlice(r.GetNamePrefixes(), o.GetNamePrefixes()) && - utils.SameEndingSubSlice(r.GetNameSuffixes(), o.GetNameSuffixes()) +// PrefixesSuffixesEquals is conceptually doing the same task as +// OutermostPrefixSuffix but performs a deeper comparison of the suffix and +// prefix slices. +// The allowEmpty flag determines whether an empty prefix/suffix +// should be considered a match on anything. +// This is used when filtering, starting with a coarser pass allowing empty +// matches, before requiring exact matches if there are multiple +// remaining candidates. +func (r *Resource) PrefixesSuffixesEquals(o ResCtx, allowEmpty bool) bool { + if allowEmpty { + eitherPrefixEmpty := len(r.GetNamePrefixes()) == 0 || len(o.GetNamePrefixes()) == 0 + eitherSuffixEmpty := len(r.GetNameSuffixes()) == 0 || len(o.GetNameSuffixes()) == 0 + + return (eitherPrefixEmpty || utils.SameEndingSubSlice(r.GetNamePrefixes(), o.GetNamePrefixes())) && + (eitherSuffixEmpty || utils.SameEndingSubSlice(r.GetNameSuffixes(), o.GetNameSuffixes())) + } else { + return utils.SameEndingSubSlice(r.GetNamePrefixes(), o.GetNamePrefixes()) && + utils.SameEndingSubSlice(r.GetNameSuffixes(), o.GetNameSuffixes()) + } } // RemoveBuildAnnotations removes annotations created by the build process. diff --git a/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go b/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go index 8d35795447..8b78889ea0 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go @@ -30,6 +30,8 @@ type FieldSpec struct { resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` Path string `json:"path,omitempty" yaml:"path,omitempty"` CreateIfNotPresent bool `json:"create,omitempty" yaml:"create,omitempty"` + + // Note: If any new pointer based members are added, FsSlice.DeepCopy needs to be updated } func (fs FieldSpec) String() string { @@ -50,6 +52,13 @@ func (s FsSlice) Less(i, j int) bool { return s[i].Gvk.IsLessThan(s[j].Gvk) } +// DeepCopy returns a new copy of FsSlice +func (s FsSlice) DeepCopy() FsSlice { + ret := make(FsSlice, len(s)) + copy(ret, s) + return ret +} + // MergeAll merges the argument into this, returning the result. // Items already present are ignored. // Items that conflict (primary key matches, but remain data differs) diff --git a/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go b/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go index a4145db3d8..eae221bed8 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go @@ -5,7 +5,7 @@ package types // GeneratorArgs contains arguments common to ConfigMap and Secret generators. type GeneratorArgs struct { - // Namespace for the configmap, optional + // Namespace for the resource, optional Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` // Name - actually the partial name - of the generated resource. diff --git a/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go b/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go index 15ea7178fb..1ed89c01ce 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go @@ -88,6 +88,9 @@ type HelmChart struct { // ApiVersions is the kubernetes apiversions used for Capabilities.APIVersions ApiVersions []string `json:"apiVersions,omitempty" yaml:"apiVersions,omitempty"` + // KubeVersion is the kubernetes version used by Helm for Capabilities.KubeVersion" + KubeVersion string `json:"kubeVersion,omitempty" yaml:"kubeVersion,omitempty"` + // NameTemplate is for specifying the name template used to name the release. NameTemplate string `json:"nameTemplate,omitempty" yaml:"nameTemplate,omitempty"` @@ -172,6 +175,10 @@ func (h HelmChart) AsHelmArgs(absChartHome string) []string { for _, apiVer := range h.ApiVersions { args = append(args, "--api-versions", apiVer) } + if h.KubeVersion != "" { + args = append(args, "--kube-version", h.KubeVersion) + } + if h.IncludeCRDs { args = append(args, "--include-crds") } diff --git a/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go b/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go index 41376bedd4..f86ec0b9e2 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go @@ -188,6 +188,7 @@ const ( deprecatedPatchesJson6902Message = "# Warning: 'patchesJson6902' is deprecated. Please use 'patches' instead." + " " + deprecatedWarningToRunEditFix deprecatedPatchesStrategicMergeMessage = "# Warning: 'patchesStrategicMerge' is deprecated. Please use 'patches' instead." + " " + deprecatedWarningToRunEditFix deprecatedVarsMessage = "# Warning: 'vars' is deprecated. Please use 'replacements' instead." + " " + deprecatedWarningToRunEditFixExperimential + deprecatedCommonLabelsWarningMessage = "# Warning: 'commonLabels' is deprecated. Please use 'labels' instead." + " " + deprecatedWarningToRunEditFix ) // CheckDeprecatedFields check deprecated field is used or not. @@ -196,6 +197,9 @@ func (k *Kustomization) CheckDeprecatedFields() *[]string { if k.Bases != nil { warningMessages = append(warningMessages, deprecatedBaseWarningMessage) } + if k.CommonLabels != nil { + warningMessages = append(warningMessages, deprecatedCommonLabelsWarningMessage) + } if k.ImageTags != nil { warningMessages = append(warningMessages, deprecatedImageTagsWarningMessage) } diff --git a/vendor/sigs.k8s.io/kustomize/api/types/labels.go b/vendor/sigs.k8s.io/kustomize/api/types/labels.go index 05ba890f9d..35f7fb2a5b 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/labels.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/labels.go @@ -6,12 +6,12 @@ package types type Label struct { // Pairs contains the key-value pairs for labels to add Pairs map[string]string `json:"pairs,omitempty" yaml:"pairs,omitempty"` - // IncludeSelectors inidicates should transformer include the + // IncludeSelectors indicates whether the transformer should include the // fieldSpecs for selectors. Custom fieldSpecs specified by // FieldSpecs will be merged with builtin fieldSpecs if this // is true. IncludeSelectors bool `json:"includeSelectors,omitempty" yaml:"includeSelectors,omitempty"` - // IncludeTemplates inidicates should transformer include the + // IncludeTemplates indicates whether the transformer should include the // spec/template/metadata fieldSpec. Custom fieldSpecs specified by // FieldSpecs will be merged with spec/template/metadata fieldSpec if this // is true. If IncludeSelectors is true, IncludeTemplates is not needed. diff --git a/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go b/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go index 741e5debc4..aa511ae79c 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go @@ -4,8 +4,10 @@ package types type HelmConfig struct { - Enabled bool - Command string + Enabled bool + Command string + ApiVersions []string + KubeVersion string } // PluginConfig holds plugin configuration. diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go index eb5396d854..41ee0afc5a 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go @@ -44,6 +44,16 @@ var ( customSchema []byte //nolint:gochecknoglobals ) +// schemaParseStatus is used in cases when a schema should be parsed, but the +// parsing may be delayed to a later time. +type schemaParseStatus uint32 + +const ( + schemaNotParsed schemaParseStatus = iota + schemaParseDelayed + schemaParsed +) + // openapiData contains the parsed openapi state. this is in a struct rather than // a list of vars so that it can be reset from tests. type openapiData struct { @@ -57,13 +67,17 @@ type openapiData struct { // is namespaceable or not namespaceabilityByResourceType map[yaml.TypeMeta]bool - // noUseBuiltInSchema stores whether we want to prevent using the built-n + // noUseBuiltInSchema stores whether we want to prevent using the built-in // Kubernetes schema as part of the global schema noUseBuiltInSchema bool // schemaInit stores whether or not we've parsed the schema already, // so that we only reparse the when necessary (to speed up performance) schemaInit bool + + // defaultBuiltInSchemaParseStatus stores the parse status of the default + // built-in schema. + defaultBuiltInSchemaParseStatus schemaParseStatus } type format string @@ -387,18 +401,45 @@ func GetSchema(s string, schema *spec.Schema) (*ResourceSchema, error) { // be true if the resource is namespace-scoped, and false if the type is // cluster-scoped. func IsNamespaceScoped(typeMeta yaml.TypeMeta) (bool, bool) { - if res, f := precomputedIsNamespaceScoped[typeMeta]; f { - return res, true + if isNamespaceScoped, found := precomputedIsNamespaceScoped[typeMeta]; found { + return isNamespaceScoped, found + } + if isInitSchemaNeededForNamespaceScopeCheck() { + initSchema() } - return isNamespaceScopedFromSchema(typeMeta) -} - -func isNamespaceScopedFromSchema(typeMeta yaml.TypeMeta) (bool, bool) { - initSchema() isNamespaceScoped, found := globalSchema.namespaceabilityByResourceType[typeMeta] return isNamespaceScoped, found } +// isInitSchemaNeededForNamespaceScopeCheck returns true if initSchema is needed +// to ensure globalSchema.namespaceabilityByResourceType is fully populated for +// cases where a custom or non-default built-in schema is in use. +func isInitSchemaNeededForNamespaceScopeCheck() bool { + schemaLock.Lock() + defer schemaLock.Unlock() + + if globalSchema.schemaInit { + return false // globalSchema already is initialized. + } + if customSchema != nil { + return true // initSchema is needed. + } + if kubernetesOpenAPIVersion == "" || kubernetesOpenAPIVersion == kubernetesOpenAPIDefaultVersion { + // The default built-in schema is in use. Since + // precomputedIsNamespaceScoped aligns with the default built-in schema + // (verified by TestIsNamespaceScopedPrecompute), there is no need to + // call initSchema. + if globalSchema.defaultBuiltInSchemaParseStatus == schemaNotParsed { + // The schema may be needed for purposes other than namespace scope + // checks. Flag it to be parsed when that need arises. + globalSchema.defaultBuiltInSchemaParseStatus = schemaParseDelayed + } + return false + } + // A non-default built-in schema is in use. initSchema is needed. + return true +} + // IsCertainlyClusterScoped returns true for Node, Namespace, etc. and // false for Pod, Deployment, etc. and kinds that aren't recognized in the // openapi data. See: @@ -638,13 +679,19 @@ func initSchema() { panic(fmt.Errorf("invalid schema file: %w", err)) } } else { - if kubernetesOpenAPIVersion == "" { + if kubernetesOpenAPIVersion == "" || kubernetesOpenAPIVersion == kubernetesOpenAPIDefaultVersion { parseBuiltinSchema(kubernetesOpenAPIDefaultVersion) + globalSchema.defaultBuiltInSchemaParseStatus = schemaParsed } else { parseBuiltinSchema(kubernetesOpenAPIVersion) } } + if globalSchema.defaultBuiltInSchemaParseStatus == schemaParseDelayed { + parseBuiltinSchema(kubernetesOpenAPIDefaultVersion) + globalSchema.defaultBuiltInSchemaParseStatus = schemaParsed + } + if err := parse(kustomizationapi.MustAsset(kustomizationAPIAssetName), JsonOrYaml); err != nil { // this should never happen panic(err) diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go index 5f45424d90..12c72ae83d 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go @@ -7,7 +7,7 @@ import ( "bytes" "io" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" + yaml "sigs.k8s.io/yaml/goyaml.v3" ) const ( diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go index a471140000..55709322a1 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go @@ -7,9 +7,9 @@ import ( "reflect" "strings" - y1_1 "gopkg.in/yaml.v2" "k8s.io/kube-openapi/pkg/validation/spec" - y1_2 "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" + y1_1 "sigs.k8s.io/yaml/goyaml.v2" + y1_2 "sigs.k8s.io/yaml/goyaml.v3" ) // typeToTag maps OpenAPI schema types to yaml 1.2 tags @@ -47,7 +47,7 @@ func FormatNonStringStyle(node *Node, schema spec.Schema) { } // if the node tag is null, make sure we don't add any non-null tags - // https://github.com/GoogleContainerTools/kpt/issues/2321 + // https://github.com/kptdev/kpt/issues/2321 if node.Tag == NodeTagNull { // must NOT quote null values node.Style = 0 diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go index ae63d258b8..e0802a897f 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go @@ -11,7 +11,7 @@ import ( "github.com/davecgh/go-spew/spew" "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" + yaml "sigs.k8s.io/yaml/goyaml.v3" ) // Append creates an ElementAppender @@ -690,6 +690,10 @@ type FieldSetter struct { // when setting it. Otherwise, if an existing node is found, the style is // retained. OverrideStyle bool `yaml:"overrideStyle,omitempty"` + + // AppendKeyStyle defines the style of the key when no existing node is + // found, and a new node is appended. + AppendKeyStyle Style `yaml:"appendKeyStyle,omitempty"` } func (s FieldSetter) Filter(rn *RNode) (*RNode, error) { @@ -720,8 +724,14 @@ func (s FieldSetter) Filter(rn *RNode) (*RNode, error) { return rn, nil } - // Clear the field if it is empty, or explicitly null - if s.Value == nil || s.Value.IsTaggedNull() { + // Clearing nil fields: + // 1. Clear any fields with no value + // 2. Clear any "null" YAML fields unless we explicitly want to keep them + // This is to balance + // 1. Persisting 'null' values passed by the user (see issue #4628) + // 2. Avoiding producing noisy documents that add any field defaulting to + // 'nil' even if they weren't present in the source document + if s.Value == nil || (s.Value.IsTaggedNull() && !s.Value.ShouldKeep) { return rn.Pipe(Clear(s.Name)) } @@ -747,6 +757,7 @@ func (s FieldSetter) Filter(rn *RNode) (*RNode, error) { &yaml.Node{ Kind: yaml.ScalarNode, Value: s.Name, + Style: s.AppendKeyStyle, HeadComment: s.Comments.HeadComment, LineComment: s.Comments.LineComment, FootComment: s.Comments.FootComment, diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go index 2ae8c1665e..a7d9016727 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go @@ -5,7 +5,7 @@ package yaml import ( "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" + yaml "sigs.k8s.io/yaml/goyaml.v3" ) // AnnotationClearer removes an annotation at metadata.annotations. diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go index 1749d38722..8e40d4c2b2 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go @@ -10,7 +10,7 @@ import ( "strings" "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" + yaml "sigs.k8s.io/yaml/goyaml.v3" ) // PathMatcher returns all RNodes matching the path wrapped in a SequenceNode. diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go index 8b328ab9d6..3b23019e1a 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go @@ -45,9 +45,9 @@ func MergeStrings(srcStr, destStr string, infer bool, mergeOptions yaml.MergeOpt } type Merger struct { - // for forwards compatibility when new functions are added to the interface } +// for forwards compatibility when new functions are added to the interface var _ walk.Visitor = Merger{} func (m Merger) VisitMap(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { @@ -66,8 +66,7 @@ func (m Merger) VisitMap(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.R // If Origin is missing, preserve explicitly set null in Dest ("null", "~", etc) if nodes.Origin().IsNil() && !nodes.Dest().IsNil() && len(nodes.Dest().YNode().Value) > 0 { - // Return a new node so that it won't have a "!!null" tag and therefore won't be cleared. - return yaml.NewScalarRNode(nodes.Dest().YNode().Value), nil + return yaml.MakePersistentNullNode(nodes.Dest().YNode().Value), nil } return nodes.Origin(), nil diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go index 7406c525e0..07c782d730 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go @@ -13,10 +13,10 @@ import ( "strings" "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" "sigs.k8s.io/kustomize/kyaml/sliceutil" "sigs.k8s.io/kustomize/kyaml/utils" "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels" + yaml "sigs.k8s.io/yaml/goyaml.v3" ) // MakeNullNode returns an RNode that represents an empty document. @@ -24,6 +24,20 @@ func MakeNullNode() *RNode { return NewRNode(&Node{Tag: NodeTagNull}) } +// MakePersistentNullNode returns an RNode that should be persisted, +// even when merging +func MakePersistentNullNode(value string) *RNode { + n := NewRNode( + &Node{ + Tag: NodeTagNull, + Value: value, + Kind: yaml.ScalarNode, + }, + ) + n.ShouldKeep = true + return n +} + // IsMissingOrNull is true if the RNode is nil or explicitly tagged null. // TODO: make this a method on RNode. func IsMissingOrNull(node *RNode) bool { @@ -214,6 +228,9 @@ type RNode struct { // object root: object root value *yaml.Node + // Whether we should keep this node, even if otherwise we would clear it + ShouldKeep bool + Match []string } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go index 897e2edaaf..73f5d8406d 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go @@ -8,8 +8,8 @@ import ( "strings" "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" "sigs.k8s.io/kustomize/kyaml/sets" + yaml "sigs.k8s.io/yaml/goyaml.v3" ) // CopyYNode returns a distinct copy of its argument. diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go index 998af6d32f..afeec0a5a7 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go @@ -58,7 +58,7 @@ func (l Walker) walkMap() (*yaml.RNode, error) { if l.Schema != nil { s = l.Schema.Field(key) } - fv, commentSch := l.fieldValue(key) + fv, commentSch, keyStyles := l.fieldValue(key) if commentSch != nil { s = commentSch } @@ -90,7 +90,13 @@ func (l Walker) walkMap() (*yaml.RNode, error) { } // this handles empty and non-empty values - _, err = dest.Pipe(yaml.FieldSetter{Name: key, Comments: comments, Value: val}) + fieldSetter := yaml.FieldSetter{ + Name: key, + Comments: comments, + AppendKeyStyle: keyStyles[val], + Value: val, + } + _, err = dest.Pipe(fieldSetter) if err != nil { return nil, err } @@ -153,10 +159,12 @@ func (l Walker) fieldNames() []string { return result } -// fieldValue returns a slice containing each source's value for fieldName -func (l Walker) fieldValue(fieldName string) ([]*yaml.RNode, *openapi.ResourceSchema) { +// fieldValue returns a slice containing each source's value for fieldName, the +// schema, and a map of each source's value to the style for the source's key. +func (l Walker) fieldValue(fieldName string) ([]*yaml.RNode, *openapi.ResourceSchema, map[*yaml.RNode]yaml.Style) { var fields []*yaml.RNode var sch *openapi.ResourceSchema + keyStyles := make(map[*yaml.RNode]yaml.Style, len(l.Sources)) for i := range l.Sources { if l.Sources[i] == nil { fields = append(fields, nil) @@ -165,9 +173,12 @@ func (l Walker) fieldValue(fieldName string) ([]*yaml.RNode, *openapi.ResourceSc field := l.Sources[i].Field(fieldName) f, s := l.valueIfPresent(field) fields = append(fields, f) + if field != nil && field.Key != nil && field.Key.YNode() != nil { + keyStyles[f] = field.Key.YNode().Style + } if sch == nil && !s.IsMissingOrNull() { sch = s } } - return fields, sch + return fields, sch, keyStyles } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE rename to vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE b/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE rename to vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS new file mode 100644 index 0000000000..73be0a3a9b --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS @@ -0,0 +1,24 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +- natasha41575 +- knverey +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md similarity index 87% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md rename to vendor/sigs.k8s.io/yaml/goyaml.v3/README.md index 08eb1babdd..b1a6b2e9e2 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md +++ b/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md @@ -1,3 +1,13 @@ +# go-yaml fork + +This package is a fork of the go-yaml library and is intended solely for consumption +by kubernetes projects. In this fork, we plan to support only critical changes required for +kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests +should be made in the upstream go-yaml library, and we will reject such changes in this fork +unless we are pulling them from upstream. + +This fork is based on v3.0.1: https://github.com/go-yaml/yaml/releases/tag/v3.0.1. + # YAML support for the Go language Introduction diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go similarity index 95% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go index f0f3d1867f..6ea0ae8c10 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go +++ b/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go @@ -226,7 +226,7 @@ func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_ } // Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { emitter.indents = append(emitter.indents, emitter.indent) if emitter.indent < 0 { if flow { @@ -241,10 +241,14 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent += 2 } else { // Everything else aligns to the chosen indentation. - emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) - } - if compact_seq { - emitter.indent = emitter.indent - 2 + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } } } return true @@ -491,7 +495,7 @@ func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_eve if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { return false } - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } if !yaml_emitter_process_foot_comment(emitter) { @@ -537,7 +541,7 @@ func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_e if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { return false } - if !yaml_emitter_increase_indent(emitter, true, false, false) { + if !yaml_emitter_increase_indent(emitter, true, false) { return false } emitter.flow_level++ @@ -560,7 +564,7 @@ func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_e if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { return false } - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } if !yaml_emitter_process_foot_comment(emitter) { @@ -605,7 +609,7 @@ func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_e return false } } - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } if !yaml_emitter_process_foot_comment(emitter) { @@ -620,7 +624,7 @@ func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_eve if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { return false } - if !yaml_emitter_increase_indent(emitter, true, false, false) { + if !yaml_emitter_increase_indent(emitter, true, false) { return false } emitter.flow_level++ @@ -646,7 +650,7 @@ func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_eve if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { return false } - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } if !yaml_emitter_process_foot_comment(emitter) { @@ -719,7 +723,7 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e return false } } - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } if !yaml_emitter_process_foot_comment(emitter) { @@ -731,9 +735,16 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && emitter.compact_sequence_indent - if !yaml_emitter_increase_indent(emitter, false, false, seq){ + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { return false } } @@ -757,7 +768,7 @@ func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { return false } - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } if !yaml_emitter_process_foot_comment(emitter) { @@ -769,7 +780,7 @@ func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_ // Expect a block key node. func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - if !yaml_emitter_increase_indent(emitter, false, false, false) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } } @@ -833,7 +844,7 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { // An indented block follows, so write the comment right now. emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment @@ -843,7 +854,7 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { return false } - if !yaml_emitter_process_line_comment(emitter, false) { + if !yaml_emitter_process_line_comment(emitter) { return false } if !yaml_emitter_process_foot_comment(emitter) { @@ -901,7 +912,7 @@ func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool if !yaml_emitter_process_tag(emitter) { return false } - if !yaml_emitter_increase_indent(emitter, true, false, false) { + if !yaml_emitter_increase_indent(emitter, true, false) { return false } if !yaml_emitter_process_scalar(emitter) { @@ -1149,8 +1160,12 @@ func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { } // Write an line comment. -func yaml_emitter_process_line_comment(emitter *yaml_emitter_t, linebreak bool) bool { +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. if linebreak && !put_break(emitter) { return false } @@ -1902,7 +1917,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !yaml_emitter_process_line_comment(emitter, true) { + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { return false } //emitter.indention = true @@ -1939,7 +1954,7 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !yaml_emitter_process_line_comment(emitter, true) { + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { return false } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go new file mode 100644 index 0000000000..b98c3321ed --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go similarity index 98% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go index bb6418dba3..8cec6da48d 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go @@ -278,16 +278,6 @@ func (e *Encoder) SetIndent(spaces int) { e.encoder.indent = spaces } -// CompactSeqIndent makes it so that '- ' is considered part of the indentation. -func (e *Encoder) CompactSeqIndent() { - e.encoder.emitter.compact_sequence_indent = true -} - -// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. -func (e *Encoder) DefaultSeqIndent() { - e.encoder.emitter.compact_sequence_indent = false -} - // Close closes the encoder by writing any remaining data. // It does not write a stream terminating string "...". func (e *Encoder) Close() (err error) { diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go similarity index 100% rename from vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go rename to vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go